35#ifndef GRAPH_GENERATOR_SEQ
36#define GRAPH_GENERATOR_SEQ
39#include "graph500/generator/graph_generator.h"
40#include "graph500/generator/utils.h"
51 commGrid.reset(
new CommGrid(MPI_COMM_WORLD, 0, 0));
55DistEdgeList<IT>::DistEdgeList(MPI_Comm & myWorld): edges(NULL), pedges(NULL), nedges(0), globalV(0)
57 commGrid.reset(
new CommGrid(myWorld, 0, 0));
61DistEdgeList<IT>::DistEdgeList(
const char * filename,
IT globaln,
IT globalm): edges(NULL), pedges(NULL), globalV(globaln)
63 commGrid.reset(
new CommGrid(MPI_COMM_WORLD, 0, 0));
65 int nprocs = commGrid->GetSize();
66 int rank = commGrid->GetRank();
69 FILE * infp = fopen(filename,
"rb");
71 IT read_offset_start, read_offset_end;
72 read_offset_start =
rank * 8 * (globalm /
nprocs);
73 read_offset_end = (
rank+1) * 8 * (globalm /
nprocs);
75 read_offset_end = 8*globalm;
79 commGrid->OpenDebugFile(
"BinRead", oput);
82 oput <<
"File exists" << std::endl;
83 oput <<
"Trying to read " << nedges <<
" edges out of " << globalm << std::endl;
87 oput <<
"File does not exist" << std::endl;
92 fseek(infp, read_offset_start, SEEK_SET);
93 fread(gen_edges, 2*nedges,
sizeof(
uint32_t), infp);
95 oput <<
"Freads done " << std::endl;
96 for(
IT i=0; i< 2*nedges; ++i)
97 edges[i] = (
IT) gen_edges[i];
98 oput <<
"Puts done " << std::endl;
108template <
typename IT>
109void DistEdgeList<IT>::Dump64bit(
string filename)
112 MPI_Comm World = commGrid->GetWorld();
113 MPI_Comm_rank(World, &
rank);
114 MPI_Comm_size(World, &
nprocs);
116 MPI_File_open(World, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &thefile);
119 prelens[
rank] = 2*nedges;
120 MPI_Allgather(MPI_IN_PLACE, 0, MPIType<IT>(), prelens, 1, MPIType<IT>(), commGrid->GetWorld());
121 IT lengthuntil = accumulate(prelens, prelens+
rank, 0);
125 MPI_File_set_view(thefile,
int64_t(lengthuntil *
sizeof(
IT)), MPIType<IT>(), MPIType<IT>(),
"native", MPI_INFO_NULL);
126 MPI_File_write(thefile, edges, prelens[
rank], MPIType<IT>(), NULL);
127 MPI_File_close(&thefile);
131template <
typename IT>
132void DistEdgeList<IT>::Dump32bit(
string filename)
135 MPI_Comm World = commGrid->GetWorld();
136 MPI_Comm_rank(World, &
rank);
137 MPI_Comm_size(World, &
nprocs);
139 MPI_File_open(World, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &thefile);
142 prelens[
rank] = 2*nedges;
143 MPI_Allgather(MPI_IN_PLACE, 0, MPIType<IT>(), prelens, 1, MPIType<IT>(), commGrid->GetWorld());
144 IT lengthuntil = accumulate(prelens, prelens+
rank,
static_cast<IT>(0));
148 MPI_File_set_view(thefile,
int64_t(lengthuntil *
sizeof(
uint32_t)), MPI_UNSIGNED, MPI_UNSIGNED,
"native", MPI_INFO_NULL);
150 for(
IT i=0; i< prelens[
rank]; ++i)
153 MPI_File_write(thefile, gen_edges, prelens[
rank], MPI_UNSIGNED, NULL);
154 MPI_File_close(&thefile);
161template <
typename IT>
162DistEdgeList<IT>::~DistEdgeList()
164 if(edges)
delete [] edges;
165 if(pedges)
delete [] pedges;
169template <
typename IT>
170void DistEdgeList<IT>::SetMemSize(
IT ne)
182 edges =
new IT[2*memedges];
189template <
typename IT>
190void DistEdgeList<IT>::CleanupEmpties()
194 while (nedges > 0 && edges[2*(nedges-1) + 0] == -1)
200 for (
IT i = 0; i < (nedges-1); i++)
202 if (edges[2*i + 0] == -1)
206 edges[2*i + 0] = edges[2*(nedges-1) + 0];
207 edges[2*i + 1] = edges[2*(nedges-1) + 1];
208 edges[2*(nedges-1) + 0] = -1;
210 while (nedges > 0 && edges[2*(nedges-1) + 0] == -1)
222template <
typename IT>
223void DistEdgeList<IT>::GenGraph500Data(
double initiator[4],
int log_numverts,
int edgefactor,
bool scramble,
bool packed)
225 if(packed && (!scramble))
227 SpParHelper::Print(
"WARNING: Packed version does always generate scrambled vertex identifiers\n");
230 globalV = ((
int64_t)1)<< log_numverts;
235 RefGen21::make_graph (log_numverts, globaledges, &nedges, (
packed_edge**)(&pedges), commGrid->GetWorld());
258 RefGen21::MakeScrambleValues(val0, val1, seed);
260 MPI_Bcast(&val0, 1, MPIType<uint64_t>(),0, commGrid->GetWorld());
261 MPI_Bcast(&val1, 1, MPIType<uint64_t>(),0, commGrid->GetWorld());
264 nedges = globaledges/
size;
267 for (
IT i = 0; i < nedges; i++)
273 for(
IT i=0; i < nedges; ++i)
275 edges[2*i+0] = RefGen21::scramble(edges[2*i+0], log_numverts, val0, val1);
276 edges[2*i+1] = RefGen21::scramble(edges[2*i+1], log_numverts, val0, val1);
293template <
typename IT>
296 IT maxedges = DEL.memedges;
302 IT perstage = maxedges / stages;
304 int nproc =(DEL.commGrid)->GetSize();
305 int rank = (DEL.commGrid)->GetRank();
306 IT * dist =
new IT[nproc];
313 for(
IT s=0; s< stages; ++s)
316 SpParHelper::Print(
"PermEdges stage starting\n");
317 double st = MPI_Wtime();
320 IT n_sofar = s*perstage;
321 IT n_thisstage = ((s==(stages-1))? (maxedges - n_sofar): perstage);
323 std::pair<double, std::pair<IT,IT> >* vecpair =
new std::pair<double, std::pair<IT,IT> >[n_thisstage];
324 dist[
rank] = n_thisstage;
325 MPI_Allgather(MPI_IN_PLACE, 1, MPIType<IT>(), dist, 1, MPIType<IT>(), DEL.commGrid->GetWorld());
327 for (
IT i = 0; i < n_thisstage; i++)
329 vecpair[i].first = M.
rand();
330 vecpair[i].second.first = DEL.edges[2*(i+n_sofar)];
331 vecpair[i].second.second = DEL.edges[2*(i+n_sofar)+1];
335 SpParHelper::MemoryEfficientPSort(vecpair, n_thisstage, dist, DEL.commGrid->GetWorld());
337 for (
IT i = 0; i < n_thisstage; i++)
339 DEL.edges[2*(i+n_sofar)] = vecpair[i].second.first;
340 DEL.edges[2*(i+n_sofar)+1] = vecpair[i].second.second;
344 double et = MPI_Wtime();
345 std::ostringstream timeinfo;
346 timeinfo <<
"Stage " << s <<
" in " << et-st <<
" seconds" << std::endl;
347 SpParHelper::Print(timeinfo.str());
363template <
typename IU>
367 int rank = DEL.commGrid->GetRank();
368 MPI_Comm World = DEL.commGrid->GetWorld();
371 FullyDistVec<IU, IU> globalPerm(DEL.commGrid);
372 globalPerm.iota(DEL.getGlobalV(), 0);
373 globalPerm.RandPerm();
374 IU locrows = globalPerm.MyLocLength();
377 IU locedgelist = 2*DEL.getNumLocalEdges();
378 bool* renamed =
new bool[locedgelist];
379 std::fill_n(renamed, locedgelist, 0);
382 IU * localPerm = NULL;
394 for (
int round = 0; round <
nprocs; round++)
400 localPerm =
new IU[permsize];
401 std::copy(globalPerm.arr.begin(), globalPerm.arr.end(), localPerm);
403 MPI_Bcast(&permsize, 1, MPIType<IU>(), round, World);
406 localPerm =
new IU[permsize];
408 MPI_Bcast(localPerm, permsize, MPIType<IU>(), round, World);
411 for (
typename std::vector<IU>::size_type j = 0; j < (unsigned)locedgelist ; j++)
414 if (startInd <= DEL.edges[j] && DEL.edges[j] < (startInd + permsize) && !renamed[j])
416 DEL.edges[j] = localPerm[DEL.edges[j]-startInd];
420 startInd += permsize;
std::shared_ptr< CommGrid > commGrid
void generate_kronecker(int rank, int size, const uint_fast32_t seed[5], int logN, int64_t M, const double initiator[], int64_t *const edges)
void make_mrg_seed(uint64_t userseed1, uint64_t userseed2, uint_fast32_t *seed)
void PermEdges(DistEdgeList< IT > &DEL)
void RenameVertices(DistEdgeList< IU > &DEL)
unsigned __int64 uint64_t