COMBINATORIAL_BLAS 1.6
 
Loading...
Searching...
No Matches
scc.cpp
Go to the documentation of this file.
1/*
2//@HEADER
3// *****************************************************************************
4//
5// HPCGraph: Graph Computation on High Performance Computing Systems
6// Copyright (2016) Sandia Corporation
7//
8// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9// the U.S. Government retains certain rights in this software.
10//
11// Redistribution and use in source and binary forms, with or without
12// modification, are permitted provided that the following conditions are
13// met:
14//
15// 1. Redistributions of source code must retain the above copyright
16// notice, this list of conditions and the following disclaimer.
17//
18// 2. Redistributions in binary form must reproduce the above copyright
19// notice, this list of conditions and the following disclaimer in the
20// documentation and/or other materials provided with the distribution.
21//
22// 3. Neither the name of the Corporation nor the names of the
23// contributors may be used to endorse or promote products derived from
24// this software without specific prior written permission.
25//
26// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37//
38// Questions? Contact George M. Slota (gmslota@sandia.gov)
39// Siva Rajamanickam (srajama@sandia.gov)
40// Kamesh Madduri (madduri@cse.psu.edu)
41//
42// *****************************************************************************
43//@HEADER
44*/
45
46
47#include <mpi.h>
48#include <omp.h>
49#include <stdio.h>
50#include <stdlib.h>
51#include <stdint.h>
52#include <fstream>
53
54#include "dist_graph.h"
55#include "comms.h"
56#include "util.h"
57
58#define SCC_NOT_VISITED 18446744073709551615
59#define SCC_VISITED_FW 18446744073709551614
60#define SCC_EXPLORED_FW 18446744073709551613
61#define SCC_VISITED_BW 18446744073709551612
62#define SCC_EXPLORED_BW 18446744073709551611
63#define SCC_MARKED 18446744073709551610
64
65extern int procid, nprocs;
66extern bool verbose, debug, verify, output;
67
68
70 uint64_t* scc, uint64_t root)
71{
72 if (debug) { printf("procid %d scc_bfs_fw() start\n", procid); }
73 double elt = 0.0;
74 if (verbose) {
75 MPI_Barrier(MPI_COMM_WORLD);
76 elt = omp_get_wtime();
77 }
78
79 q->queue_size = 0;
80 q->next_size = 0;
81 q->send_size = 0;
82
83 uint64_t root_index = get_value(&g->map, root);
84 if (root_index != NULL_KEY && root_index < g->n_local)
85 {
86 q->queue[0] = root;
87 q->queue_size = 1;
88 }
89
90 uint64_t iter = 0;
91 comm->global_queue_size = 1;
92#pragma omp parallel default(shared)
93{
96
97#pragma omp for
98 for (uint64_t i = 0; i < g->n_local; ++i)
99 if (out_degree(g, i) == 0 || in_degree(g, i) == 0)
100 scc[i] = g->local_unmap[i];
101 else
102 scc[i] = SCC_NOT_VISITED;
103#pragma omp for
104 for (uint64_t i = g->n_local; i < g->n_total; ++i)
105 scc[i] = SCC_NOT_VISITED;
106
107 while (comm->global_queue_size)
108 {
109 if (debug && tq.tid == 0) {
110 printf("Task: %d scc_bfs_fw() GQ: %lu, TQ: %lu\n",
111 procid, comm->global_queue_size, q->queue_size);
112 }
113
114#pragma omp for schedule(guided) nowait
115 for (uint64_t i = 0; i < q->queue_size; ++i)
116 {
117 uint64_t vert = q->queue[i];
118 uint64_t vert_index = get_value(&g->map, vert);
119 if (scc[vert_index] != SCC_NOT_VISITED &&
120 scc[vert_index] != SCC_VISITED_FW)
121 continue;
122 scc[vert_index] = SCC_EXPLORED_FW;
123
124 uint64_t out_degree = out_degree(g, vert_index);
125 uint64_t* outs = out_vertices(g, vert_index);
126 for (uint64_t j = 0; j < out_degree; ++j)
127 {
128 uint64_t out_index = outs[j];
129 if (scc[out_index] == SCC_NOT_VISITED)
130 {
131 scc[out_index] = SCC_VISITED_FW;
132
133 if (out_index < g->n_local)
134 add_vid_to_queue(&tq, q, g->local_unmap[out_index]);
135 else
136 add_vid_to_send(&tq, q, out_index);
137 }
138 }
139 }
140
141 empty_queue(&tq, q);
142 empty_send(&tq, q);
143#pragma omp barrier
144
145#pragma omp single
146 {
147 exchange_verts(g, comm, q);
148 ++iter;
149 }
150 } // end while
151
153} // end parallel
154
155 if (verbose) {
156 elt = omp_get_wtime() - elt;
157 printf("Task %d scc_bfs_fw() time %9.6f (s)\n", procid, elt);
158 }
159 if (debug) { printf("Task %d scc_bfs_fw() success\n", procid); }
160
161 return 0;
162}
163
165 uint64_t* scc, uint64_t root)
166{
167 if (debug) { printf("procid %d scc_bfs_bw() start\n", procid); }
168 double elt = 0.0;
169 if (verbose) {
170 MPI_Barrier(MPI_COMM_WORLD);
171 elt = omp_get_wtime();
172 }
173
174 for (int32_t i = 0; i < nprocs; ++i)
175 comm->sendcounts_temp[i] = 0;
176
177 q->queue_size = 0;
178 q->next_size = 0;
179 q->send_size = 0;
180 uint64_t num_unassigned = 0;
181
182 uint64_t root_index = get_value(&g->map, root);
183 if (root_index != NULL_KEY && root_index < g->n_local)
184 {
185 q->queue[0] = root;
186 q->queue_size = 1;
187 }
188
189 uint64_t iter = 0;
190 comm->global_queue_size = 1;
191#pragma omp parallel default(shared)
192{
194 thread_comm_t tc;
196 init_thread_comm(&tc);
197
198 while (comm->global_queue_size)
199 {
200 if (debug && tq.tid == 0) {
201 printf("Task: %d scc_bfs_bw() GQ: %lu, TQ: %lu\n",
202 procid, comm->global_queue_size, q->queue_size);
203 }
204
205#pragma omp for schedule(guided) nowait
206 for (uint64_t i = 0; i < q->queue_size; ++i)
207 {
208 uint64_t vert = q->queue[i];
209 uint64_t vert_index = get_value(&g->map, vert);
210 if (scc[vert_index] != SCC_VISITED_BW &&
211 scc[vert_index] != SCC_EXPLORED_FW)
212 continue;
213 scc[vert_index] = SCC_EXPLORED_BW;
214
215 uint64_t in_degree = in_degree(g, vert_index);
216 uint64_t* ins = in_vertices(g, vert_index);
217 for (uint64_t j = 0; j < in_degree; ++j)
218 {
219 uint64_t in_index = ins[j];
220 if ((in_index < g->n_local && scc[in_index] == SCC_EXPLORED_FW) ||
221 (in_index >= g->n_local && scc[in_index] != SCC_VISITED_BW))
222 {
223 scc[in_index] = SCC_VISITED_BW;
224
225 if (in_index < g->n_local)
226 add_vid_to_queue(&tq, q, g->local_unmap[in_index]);
227 else
228 add_vid_to_send(&tq, q, in_index);
229 }
230 }
231 }
232
233 empty_queue(&tq, q);
234 empty_send(&tq, q);
235#pragma omp barrier
236
237#pragma omp single
238 {
239 exchange_verts(g, comm, q);
240 ++iter;
241 }
242 } // end while
243
244#pragma omp for schedule(guided) reduction(+:num_unassigned) nowait
245 for (uint64_t i = 0; i < g->n_local; ++i)
246 if (scc[i] == SCC_EXPLORED_BW)
247 scc[i] = root;
248 else if (scc[i] == SCC_EXPLORED_FW)
249 {
250 scc[i] = SCC_NOT_VISITED;
251 ++num_unassigned;
252 }
253 else if (scc[i] == SCC_NOT_VISITED)
254 ++num_unassigned;
255
256 for (int32_t i = 0; i < nprocs; ++i)
257 tc.sendcounts_thread[i] = 0;
258
259#pragma omp for schedule(guided) nowait
260 for (uint64_t i = 0; i < g->n_local; ++i)
261 update_sendcounts_thread(g, &tc, i);
262
263 for (int32_t i = 0; i < nprocs; ++i)
264 {
265#pragma omp atomic
266 comm->sendcounts_temp[i] += tc.sendcounts_thread[i];
267
268 tc.sendcounts_thread[i] = 0;
269 }
270#pragma omp barrier
271
272#pragma omp single
273{
276}
277
278#pragma omp for schedule(guided) nowait
279 for (uint64_t i = 0; i < g->n_local; ++i)
280 update_vid_data_queues(g, &tc, comm, i, scc[i]);
281
282 empty_vid_data(&tc, comm);
283#pragma omp barrier
284
285#pragma omp single
286{
287 exchange_verts(comm);
288 exchange_data(comm);
289}
290
291#pragma omp for
292 for (uint64_t i = 0; i < comm->total_recv; ++i)
293 {
294 uint64_t vert_index = get_value(&g->map, comm->recvbuf_vert[i]);
295 scc[vert_index] = comm->recvbuf_data[i];
296 }
297
298#pragma omp single
299{
301}
302
305} // end parallel
306
307 MPI_Allreduce(MPI_IN_PLACE, &num_unassigned, 1,
308 MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
309
310 if (verbose) {
311 elt = omp_get_wtime() - elt;
312 printf("Task %d scc_bfs_bw() time %9.6f (s)\n", procid, elt);
313 }
314 if (debug) { printf("Task %d scc_bfs_bw() success\n", procid); }
315
316 return num_unassigned;
317}
318
319
321 uint64_t* scc, uint64_t* colors)
322{
323 if (debug) { printf("Task %d scc_color() start\n", procid); }
324 double elt = 0.0;
325 if (verbose) {
326 MPI_Barrier(MPI_COMM_WORLD);
327 elt = omp_get_wtime();
328 }
329
330 q->queue_size = 0;
331 q->next_size = 0;
332 q->send_size = 0;
333
334 for (int32_t i = 0; i < nprocs; ++i)
335 comm->sendcounts_temp[i] = 0;
336
337 uint64_t iter = 0;
338 comm->global_queue_size = 1;
339#pragma omp parallel default(shared)
340{
342 thread_comm_t tc;
344 init_thread_comm(&tc);
345
346#pragma omp for
347 for (uint64_t i = 0; i < g->n_local; ++i)
348 {
349 if (scc[i] == SCC_NOT_VISITED)
350 {
351 colors[i] = g->local_unmap[i];
352 add_vid_to_queue(&tq, q, i);
353 }
354 else
355 colors[i] = SCC_MARKED;
356 }
357
358#pragma omp for
359 for (uint64_t i = g->n_local; i < g->n_total; ++i)
360 if (scc[i] == SCC_NOT_VISITED)
361 colors[i] = g->ghost_unmap[i - g->n_local];
362 else
363 colors[i] = SCC_MARKED;
364
365#pragma omp for
366 for (uint64_t i = 0; i < g->n_total; ++i)
367 if (colors[i] == SCC_MARKED && scc[i] >= g->n)
368 printf("SCC assignment is %lu, out of bounds for n=%lu\n", scc[i], g->n);
369
370 empty_queue(&tq, q);
371#pragma omp barrier
372
373#pragma omp single
374{
375 q->queue_size = q->next_size;
376 q->next_size = 0;
377
378 uint64_t* temp = q->queue;
379 q->queue = q->queue_next;
380 q->queue_next = temp;
381}
382
383 while (comm->global_queue_size)
384 {
385 if (debug && tq.tid == 0) {
386 printf("Task %d Iter %lu scc_color() GQ: %lu, TQ: %lu\n",
387 procid, iter, comm->global_queue_size, q->queue_size);
388 }
389
390#pragma omp for schedule(guided) nowait
391 for (uint64_t i = 0; i < q->queue_size; ++i)
392 {
393 uint64_t vert_index = q->queue[i];
394 bool send = false;
395
396 uint64_t in_degree = in_degree(g, vert_index);
397 uint64_t* ins = in_vertices(g, vert_index);
398 for (uint64_t j = 0; j < in_degree; ++j)
399 {
400 uint64_t in_index = ins[j];
401 if (colors[in_index] != SCC_MARKED &&
402 colors[in_index] > colors[vert_index])
403 {
404 colors[vert_index] = colors[in_index];
405 send = true;
406 }
407 }
408
409 if (send)
410 add_vid_to_send(&tq, q, vert_index);
411 }
412
413 empty_send(&tq, q);
414#pragma omp barrier
415
416 for (int32_t i = 0; i < nprocs; ++i)
417 tc.sendcounts_thread[i] = 0;
418
419#pragma omp for schedule(guided) nowait
420 for (uint64_t i = 0; i < q->send_size; ++i)
421 {
422 uint64_t vert_index = q->queue_send[i];
423 update_sendcounts_thread(g, &tc, vert_index);
424 }
425
426 for (int32_t i = 0; i < nprocs; ++i)
427 {
428#pragma omp atomic
429 comm->sendcounts_temp[i] += tc.sendcounts_thread[i];
430
431 tc.sendcounts_thread[i] = 0;
432 }
433#pragma omp barrier
434
435#pragma omp single
436{
438}
439
440#pragma omp for schedule(guided) nowait
441 for (uint64_t i = 0; i < q->send_size; ++i)
442 {
443 uint64_t vert_index = q->queue_send[i];
444 update_vid_data_queues(g, &tc, comm,
445 vert_index, colors[vert_index]);
446 }
447
448 empty_vid_data(&tc, comm);
449#pragma omp barrier
450
451#pragma omp single
452{
453 exchange_vert_data(g, comm, q);
454} // end single
455
456
457#pragma omp for
458 for (uint64_t i = 0; i < comm->total_recv; ++i)
459 {
460 uint64_t vert_index = get_value(&g->map, comm->recvbuf_vert[i]);
461 colors[vert_index] = comm->recvbuf_data[i];
462 }
463
464#pragma omp single
465{
467 ++iter;
468}
469 }// end while
470
473} // end parallel
474
475 if (verbose) {
476 elt = omp_get_wtime() - elt;
477 printf("Task %d, scc_color() time %9.6f (s)\n", procid, elt);
478 }
479 if (debug) { printf("Task %d scc_color() success\n", procid); }
480
481 return 0;
482}
483
484
485
487 uint64_t* scc, uint64_t* colors)
488{
489 if (debug) { printf("procid %d scc_find_sccs() start\n", procid); }
490 double elt = 0.0;
491 if (verbose) {
492 MPI_Barrier(MPI_COMM_WORLD);
493 elt = omp_get_wtime();
494 }
495
496 q->queue_size = 0;
497 q->next_size = 0;
498 q->send_size = 0;
499 uint64_t num_unassigned = 0;
500
501 uint64_t iter = 0;
502 comm->global_queue_size = 1;
503#pragma omp parallel default(shared)
504{
506 thread_comm_t tc;
508 init_thread_comm(&tc);
509
510#pragma omp for
511 for (uint64_t i = 0; i < g->n_local; ++i)
512 if (g->local_unmap[i] == colors[i])
513 add_vid_to_queue(&tq, q, g->local_unmap[i]);
514
515 empty_queue(&tq, q);
516#pragma omp barrier
517#pragma omp single
518{
519 q->queue_size = q->next_size;
520 q->next_size = 0;
521
522 uint64_t* temp = q->queue;
523 q->queue = q->queue_next;
524 q->queue_next = temp;
525}
526
527 while (comm->global_queue_size)
528 {
529 if (debug && tq.tid == 0) {
530 printf("Task: %d scc_find_sccs() GQ: %lu, TQ: %lu\n",
531 procid, comm->global_queue_size, q->queue_size);
532 }
533
534#pragma omp for schedule(guided) nowait
535 for (uint64_t i = 0; i < q->queue_size; ++i)
536 {
537 uint64_t vert = q->queue[i];
538 uint64_t vert_index = get_value(&g->map, vert);
539 if (scc[vert_index] != SCC_NOT_VISITED &&
540 scc[vert_index] != SCC_VISITED_FW)
541 continue;
542 scc[vert_index] = colors[vert_index];
543
544 uint64_t in_degree = in_degree(g, vert_index);
545 uint64_t* ins = in_vertices(g, vert_index);
546 for (uint64_t j = 0; j < in_degree; ++j)
547 {
548 uint64_t in_index = ins[j];
549 if (scc[in_index] == SCC_NOT_VISITED &&
550 colors[in_index] == colors[vert_index])
551 {
552 scc[in_index] = SCC_VISITED_FW;
553
554 if (in_index < g->n_local)
555 add_vid_to_queue(&tq, q, g->local_unmap[in_index]);
556 else
557 add_vid_to_send(&tq, q, in_index);
558 }
559 }
560 }
561
562 empty_queue(&tq, q);
563 empty_send(&tq, q);
564#pragma omp barrier
565
566#pragma omp single
567 {
568 exchange_verts(g, comm, q);
569 ++iter;
570 }
571 } // end while
572
573#pragma omp for schedule(guided) reduction(+:num_unassigned) nowait
574 for (uint64_t i = 0; i < g->n_local; ++i)
575 if (scc[i] == SCC_EXPLORED_FW)
576 {
577 scc[i] = SCC_NOT_VISITED;
578 ++num_unassigned;
579 }
580 else if (scc[i] == SCC_NOT_VISITED)
581 ++num_unassigned;
582
583 for (int32_t i = 0; i < nprocs; ++i)
584 tc.sendcounts_thread[i] = 0;
585
586#pragma omp for schedule(guided) nowait
587 for (uint64_t i = 0; i < g->n_local; ++i)
588 update_sendcounts_thread(g, &tc, i);
589
590 for (int32_t i = 0; i < nprocs; ++i)
591 {
592#pragma omp atomic
593 comm->sendcounts_temp[i] += tc.sendcounts_thread[i];
594
595 tc.sendcounts_thread[i] = 0;
596 }
597#pragma omp barrier
598
599#pragma omp single
600{
603}
604
605#pragma omp for schedule(guided) nowait
606 for (uint64_t i = 0; i < g->n_local; ++i)
607 update_vid_data_queues(g, &tc, comm, i, scc[i]);
608
609 empty_vid_data(&tc, comm);
610#pragma omp barrier
611
612#pragma omp single
613{
614 exchange_verts(comm);
615 exchange_data(comm);
616}
617
618#pragma omp for
619 for (uint64_t i = 0; i < comm->total_recv; ++i)
620 {
621 uint64_t vert_index = get_value(&g->map, comm->recvbuf_vert[i]);
622 scc[vert_index] = comm->recvbuf_data[i];
623 }
624
625#pragma omp single
626{
628}
629
632} // end parallel
633
634 MPI_Allreduce(MPI_IN_PLACE, &num_unassigned, 1,
635 MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
636
637 if (verbose) {
638 elt = omp_get_wtime() - elt;
639 printf("Task %d scc_find_sccs() time %9.6f (s)\n", procid, elt);
640 }
641 if (debug) { printf("Task %d scc_find_sccs() success\n", procid); }
642
643 return num_unassigned;
644}
645
646
648{
649 MPI_Barrier(MPI_COMM_WORLD);
650
651 uint64_t* counts = (uint64_t*)malloc(g->n*sizeof(uint64_t));
652 uint64_t unassigned = 0;
653
654 for (uint64_t i = 0; i < g->n; ++i)
655 counts[i] = 0;
656 for (uint64_t i = 0; i < g->n_local; ++i)
657 //if (scc[i] != SCC_NOT_VISITED)
658 if (scc[i] < g->n)
659 ++counts[scc[i]];
660 else
661 ++unassigned;
662
663 MPI_Allreduce(MPI_IN_PLACE, counts, (int32_t)g->n,
664 MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
665 MPI_Allreduce(MPI_IN_PLACE, &unassigned, 1,
666 MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
667
668 uint64_t num_sccs = 0;
669 uint64_t num_trivial = 0;
670 uint64_t max_scc = 0;
671 for (uint64_t i = 0; i < g->n; ++i)
672 if (counts[i])
673 {
674 ++num_sccs;
675 if (counts[i] > max_scc)
676 max_scc = counts[i];
677 if (counts[i] == 1)
678 ++num_trivial;
679 }
680
681 if (procid == 0)
682 printf("Num SCCs: %lu, Max SCC: %lu, Trivial: %lu, Unassigned %lu\n",
683 num_sccs, max_scc, num_trivial, unassigned);
684
685 free(counts);
686
687 return 0;
688}
689
690
691int scc_output(dist_graph_t* g, uint64_t* scc, char* output_file)
692{
693 if (verbose) printf("Task %d scc assignments to %s\n", procid, output_file);
694
695 uint64_t* global_scc = (uint64_t*)malloc(g->n*sizeof(uint64_t));
696
697#pragma omp parallel for
698 for (uint64_t i = 0; i < g->n; ++i)
699 global_scc[i] = SCC_NOT_VISITED;
700
701#pragma omp parallel for
702 for (uint64_t i = 0; i < g->n_local; ++i)
703 global_scc[g->local_unmap[i]] = scc[i];
704
705 if (procid == 0)
706 MPI_Reduce(MPI_IN_PLACE, global_scc, (int32_t)g->n,
707 MPI_UINT64_T, MPI_MIN, 0, MPI_COMM_WORLD);
708 else
709 MPI_Reduce(global_scc, global_scc, (int32_t)g->n,
710 MPI_UINT64_T, MPI_MIN, 0, MPI_COMM_WORLD);
711
712 if (procid == 0)
713 {
714 if (debug)
715 for (uint64_t i = 0; i < g->n; ++i)
716 if (global_scc[i] == SCC_NOT_VISITED)
717 {
718 printf("SCC error: %lu not assigned\n", i);
719 global_scc[i] = 0;
720 }
721
722 std::ofstream outfile;
723 outfile.open(output_file);
724
725 for (uint64_t i = 0; i < g->n; ++i)
726 outfile << global_scc[i] << std::endl;
727
728 outfile.close();
729 }
730
731 free(global_scc);
732
733 if (verbose) printf("Task %d done writing assignments\n", procid);
734
735 return 0;
736}
737
739 uint64_t root, char* output_file)
740{
741 if (debug) { printf("Task %d scc_dist() start\n", procid); }
742
743 MPI_Barrier(MPI_COMM_WORLD);
744 double elt = omp_get_wtime();
745
746 uint64_t* scc = (uint64_t*)malloc(g->n_total*sizeof(uint64_t));
747 uint64_t* colors = (uint64_t*)malloc(g->n_total*sizeof(uint64_t));
748 uint64_t num_unassigned = 0;
749 scc_bfs_fw(g, comm, q, scc, root);
750 num_unassigned = scc_bfs_bw(g, comm, q, scc, root);
751
752 while (num_unassigned)
753 {
754 scc_color(g, comm, q, scc, colors);
755 num_unassigned = scc_find_sccs(g, comm, q, scc, colors);
756 }
757
758 MPI_Barrier(MPI_COMM_WORLD);
759 elt = omp_get_wtime() - elt;
760 if (procid == 0) printf("SCC time %9.6f (s)\n", elt);
761
762 if (output) {
763 scc_output(g, scc, output_file);
764 }
765
766 if (verify) {
767 scc_verify(g, scc);
768 }
769
770 free(scc);
771
772 if (debug) printf("Task %d scc_dist() success\n", procid);
773 return 0;
774}
775
Mac OS X ATTR com apple quarantine q
Definition ._remapper.cpp:1
void init_thread_comm(thread_comm_t *tc)
Definition comms.cpp:169
void init_thread_queue(thread_queue_t *tq)
Definition comms.cpp:136
void init_recvbuf_vid_data(mpi_data_t *comm)
Definition comms.cpp:278
void clear_thread_queue(thread_queue_t *tq)
Definition comms.cpp:157
void init_sendbuf_vid_data(mpi_data_t *comm)
Definition comms.cpp:251
void clear_allbuf_vid_data(mpi_data_t *comm)
Definition comms.cpp:370
void clear_thread_comm(thread_comm_t *tc)
Definition comms.cpp:199
void empty_vid_data(thread_comm_t *tc, mpi_data_t *comm)
Definition comms.h:792
void empty_queue(thread_queue_t *tq, queue_data_t *q)
Definition comms.h:730
void add_vid_to_queue(thread_queue_t *tq, queue_data_t *q, uint64_t vertex_id)
Definition comms.h:721
void exchange_verts(dist_graph_t *g, mpi_data_t *comm, queue_data_t *q)
Definition comms.h:184
void exchange_data(mpi_data_t *comm)
Definition comms.h:428
void update_vid_data_queues(dist_graph_t *g, thread_comm_t *tc, mpi_data_t *comm, uint64_t vert_index, uint64_t data)
Definition comms.h:628
void add_vid_to_send(thread_queue_t *tq, queue_data_t *q, uint64_t vertex_id)
Definition comms.h:743
void exchange_vert_data(dist_graph_t *g, mpi_data_t *comm, queue_data_t *q)
Definition comms.h:258
void empty_send(thread_queue_t *tq, queue_data_t *q)
Definition comms.h:752
void update_sendcounts_thread(dist_graph_t *g, thread_comm_t *tc, uint64_t vert_index)
Definition comms.h:563
#define in_degree(g, n)
Definition dist_graph.h:55
#define out_vertices(g, n)
Definition dist_graph.h:56
#define out_degree(g, n)
Definition dist_graph.h:54
#define in_vertices(g, n)
Definition dist_graph.h:57
#define NULL_KEY
Definition fast_map.h:58
uint64_t get_value(fast_map *map, uint64_t key)
Definition fast_map.h:132
int procid
Definition main.cpp:55
#define SCC_VISITED_FW
Definition scc.cpp:59
uint64_t scc_bfs_bw(dist_graph_t *g, mpi_data_t *comm, queue_data_t *q, uint64_t *scc, uint64_t root)
Definition scc.cpp:164
bool debug
Definition scc.cpp:66
#define SCC_MARKED
Definition scc.cpp:63
int scc_dist(dist_graph_t *g, mpi_data_t *comm, queue_data_t *q, uint64_t root, char *output_file)
Definition scc.cpp:738
int scc_verify(dist_graph_t *g, uint64_t *scc)
Definition scc.cpp:647
bool verify
Definition scc.cpp:66
#define SCC_NOT_VISITED
Definition scc.cpp:58
int scc_bfs_fw(dist_graph_t *g, mpi_data_t *comm, queue_data_t *q, uint64_t *scc, uint64_t root)
Definition scc.cpp:69
bool verbose
Definition main.cpp:56
#define SCC_EXPLORED_BW
Definition scc.cpp:62
bool output
Definition scc.cpp:66
uint64_t scc_find_sccs(dist_graph_t *g, mpi_data_t *comm, queue_data_t *q, uint64_t *scc, uint64_t *colors)
Definition scc.cpp:486
int nprocs
Definition scc.cpp:65
#define SCC_VISITED_BW
Definition scc.cpp:61
#define SCC_EXPLORED_FW
Definition scc.cpp:60
int scc_color(dist_graph_t *g, mpi_data_t *comm, queue_data_t *q, uint64_t *scc, uint64_t *colors)
Definition scc.cpp:320
int scc_output(dist_graph_t *g, uint64_t *scc, char *output_file)
Definition scc.cpp:691
signed int int32_t
Definition stdint.h:77
unsigned __int64 uint64_t
Definition stdint.h:90
uint64_t n_total
Definition dist_graph.h:69
uint64_t * local_unmap
Definition dist_graph.h:80
uint64_t * ghost_unmap
Definition dist_graph.h:81
uint64_t n_local
Definition dist_graph.h:66
uint64_t n
Definition dist_graph.h:61
fast_map map
Definition dist_graph.h:83
uint64_t * sendcounts_temp
Definition comms.h:73
uint64_t * recvbuf_data
Definition comms.h:82
uint64_t total_recv
Definition comms.h:85
uint64_t global_queue_size
Definition comms.h:87
uint64_t * recvbuf_vert
Definition comms.h:81
uint64_t * sendcounts_thread
Definition comms.h:111
int32_t tid
Definition comms.h:101