27 #ifndef _SYNC_STRUCT_MACROS_
28 #define _SYNC_STRUCT_MACROS_
157 #ifdef GALOIS_ENABLE_GPU
158 #define GALOIS_SYNC_STRUCTURE_ADD_EDGES(fieldtype) \
159 struct EdgeAddReduce { \
160 using ValTy = fieldtype; \
162 static ValTy extract(uint64_t edgeID, ValTy& edgeData) { \
163 if (personality == GPU_CUDA) \
164 return get_edge_cuda(cuda_ctx, edgeID); \
165 assert(personality == CPU); \
169 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
170 DataCommMode* data_mode) { \
171 if (personality == GPU_CUDA) { \
172 batch_get_edge_cuda(cuda_ctx, from_id, y, s, data_mode); \
175 assert(personality == CPU); \
179 static bool extract_batch(unsigned from_id, uint8_t* y) { \
180 if (personality == GPU_CUDA) { \
181 batch_get_edge_cuda(cuda_ctx, from_id, y); \
184 assert(personality == CPU); \
188 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
189 DataCommMode* data_mode) { \
190 if (personality == GPU_CUDA) { \
191 batch_get_reset_edge_cuda(cuda_ctx, from_id, y, s, data_mode, \
195 assert(personality == CPU); \
199 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
200 if (personality == GPU_CUDA) { \
201 batch_get_reset_edge_cuda(cuda_ctx, from_id, y, (ValTy)0); \
204 assert(personality == CPU); \
208 static bool reduce(uint64_t edgeID, ValTy& edgeData, ValTy y) { \
209 if (personality == GPU_CUDA) { \
210 add_edge_cuda(cuda_ctx, edgeID, y); \
213 assert(personality == CPU); \
218 static bool reduce_batch(unsigned from_id, uint8_t* y, \
219 DataCommMode data_mode) { \
220 if (personality == GPU_CUDA) { \
221 batch_add_edge_cuda(cuda_ctx, from_id, y, data_mode); \
224 assert(personality == CPU); \
228 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
229 DataCommMode data_mode) { \
230 if (personality == GPU_CUDA) { \
231 batch_add_mirror_edge_cuda(cuda_ctx, from_id, y, data_mode); \
234 assert(personality == CPU); \
238 static void reset(uint64_t edgeID, ValTy& edgeData) { \
239 if (personality == GPU_CUDA) { \
240 set_edge_cuda(cuda_ctx, edgeID, (ValTy)0); \
242 assert(personality == CPU); \
246 static bool reset_batch(size_t GALOIS_UNUSED(begin), \
247 size_t GALOIS_UNUSED(end)) { \
248 if (personality == GPU_CUDA) { \
249 batch_reset_edge_cuda(cuda_ctx, begin, end, (ValTy)0); \
252 assert(personality == CPU); \
256 static void setVal(uint64_t edgeID, ValTy& edgeData, ValTy y) { \
257 if (personality == GPU_CUDA) { \
258 set_edge_cuda(cuda_ctx, edgeID, (ValTy)0); \
260 assert(personality == CPU); \
264 static bool setVal_batch(unsigned from_id, uint8_t* y, \
265 DataCommMode data_mode) { \
266 if (personality == GPU_CUDA) { \
267 batch_set_mirror_edge_cuda(cuda_ctx, from_id, y, data_mode); \
270 assert(personality == CPU); \
275 #define GALOIS_SYNC_STRUCTURE_ADD_EDGES(fieldtype) \
276 struct EdgeAddReduce { \
277 using ValTy = fieldtype; \
279 static ValTy extract(uint64_t edgeID, ValTy& edgeData) { \
283 static bool extract_batch(unsigned, uint8_t*, size_t*, DataCommMode*) { \
287 static bool extract_batch(unsigned, uint8_t*) { return false; } \
289 static bool extract_reset_batch(unsigned, uint8_t*, size_t*, \
294 static bool extract_reset_batch(unsigned, uint8_t*) { return false; } \
296 static bool reduce(uint64_t edgeID, ValTy& edgeData, ValTy y) { \
301 static bool reduce_batch(unsigned, uint8_t*, DataCommMode) { \
305 static bool reduce_mirror_batch(unsigned, uint8_t*, DataCommMode) { \
309 static void reset(uint64_t edgeID, ValTy& edgeData) { edgeData = 0; } \
311 static void setVal(uint64_t edgeID, ValTy& edgeData, ValTy y) { \
315 static bool setVal_batch(unsigned, uint8_t*, DataCommMode) { \
331 #ifdef GALOIS_ENABLE_GPU
333 #define GALOIS_SYNC_STRUCTURE_BITSET_EDGES \
334 struct Bitset_edges { \
335 static constexpr bool is_vector_bitset() { return false; } \
336 static bool is_valid() { return true; } \
338 static galois::DynamicBitSet& get() { \
339 if (personality == GPU_CUDA) \
340 get_bitset_edge_cuda(cuda_ctx, \
341 (uint64_t*)bitset_edges.get_vec().data()); \
342 return bitset_edges; \
345 static void reset_range(size_t begin, size_t end) { \
346 if (personality == GPU_CUDA) { \
347 bitset_edge_reset_cuda(cuda_ctx, begin, end); \
349 assert(personality == CPU); \
350 bitset_edges.reset(begin, end); \
356 #define GALOIS_SYNC_STRUCTURE_BITSET_EDGES \
357 struct Bitset_edges { \
358 static constexpr bool is_vector_bitset() { return false; } \
360 static constexpr bool is_valid() { return true; } \
362 static galois::DynamicBitSet& get() { return bitset_edges; } \
364 static void reset_range(size_t begin, size_t end) { \
365 bitset_edges.reset(begin, end); \
377 #ifdef GALOIS_ENABLE_GPU
379 #define GALOIS_SYNC_STRUCTURE_REDUCE_ADD(fieldname, fieldtype) \
380 struct Reduce_add_##fieldname { \
381 typedef fieldtype ValTy; \
383 static ValTy extract(uint32_t node_id, const struct NodeData& node) { \
384 if (personality == GPU_CUDA) \
385 return get_node_##fieldname##_cuda(cuda_ctx, node_id); \
386 assert(personality == CPU); \
387 return node.fieldname; \
390 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
391 DataCommMode* data_mode) { \
392 if (personality == GPU_CUDA) { \
393 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, data_mode); \
396 assert(personality == CPU); \
400 static bool extract_batch(unsigned from_id, uint8_t* y) { \
401 if (personality == GPU_CUDA) { \
402 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
405 assert(personality == CPU); \
409 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
410 DataCommMode* data_mode) { \
411 if (personality == GPU_CUDA) { \
412 batch_get_reset_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, \
413 data_mode, (ValTy)0); \
416 assert(personality == CPU); \
420 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
421 if (personality == GPU_CUDA) { \
422 batch_get_reset_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
426 assert(personality == CPU); \
430 static bool reset_batch(size_t begin, size_t end) { \
431 if (personality == GPU_CUDA) { \
432 batch_reset_node_##fieldname##_cuda(cuda_ctx, begin, end, (ValTy)0); \
435 assert(personality == CPU); \
439 static bool reduce(uint32_t node_id, struct NodeData& node, ValTy y) { \
440 if (personality == GPU_CUDA) { \
441 add_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
444 assert(personality == CPU); \
446 galois::add(node.fieldname, y); \
451 static bool reduce_batch(unsigned from_id, uint8_t* y, \
452 DataCommMode data_mode) { \
453 if (personality == GPU_CUDA) { \
454 batch_add_node_##fieldname##_cuda(cuda_ctx, from_id, y, data_mode); \
457 assert(personality == CPU); \
461 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
462 DataCommMode data_mode) { \
463 if (personality == GPU_CUDA) { \
464 batch_add_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
468 assert(personality == CPU); \
472 static void reset(uint32_t node_id, struct NodeData& node) { \
473 if (personality == GPU_CUDA) { \
474 set_node_##fieldname##_cuda(cuda_ctx, node_id, (ValTy)0); \
475 } else if (personality == CPU) \
476 galois::set(node.fieldname, (ValTy)0); \
479 static void setVal(uint32_t node_id, struct NodeData& node, ValTy y) { \
480 if (personality == GPU_CUDA) \
481 set_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
482 else if (personality == CPU) \
483 node.fieldname = y; \
486 static bool setVal_batch(unsigned from_id, uint8_t* y, \
487 DataCommMode data_mode) { \
488 if (personality == GPU_CUDA) { \
489 batch_set_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
493 assert(personality == CPU); \
499 #define GALOIS_SYNC_STRUCTURE_REDUCE_ADD(fieldname, fieldtype) \
500 struct Reduce_add_##fieldname { \
501 typedef fieldtype ValTy; \
503 static ValTy extract(uint32_t, const struct NodeData& node) { \
504 return node.fieldname; \
507 static bool extract_batch(unsigned, uint8_t*, size_t*, DataCommMode*) { \
511 static bool extract_batch(unsigned, uint8_t*) { return false; } \
513 static bool extract_reset_batch(unsigned, uint8_t*, size_t*, \
518 static bool extract_reset_batch(unsigned, uint8_t*) { return false; } \
520 static bool reset_batch(size_t, size_t) { return false; } \
522 static bool reduce(uint32_t, struct NodeData& node, ValTy y) { \
524 galois::add(node.fieldname, y); \
529 static bool reduce_batch(unsigned, uint8_t*, DataCommMode) { \
533 static bool reduce_mirror_batch(unsigned, uint8_t*, DataCommMode) { \
537 static void reset(uint32_t, struct NodeData& node) { \
538 galois::set(node.fieldname, (ValTy)0); \
541 static void setVal(uint32_t, struct NodeData& node, ValTy y) { \
542 node.fieldname = y; \
545 static bool setVal_batch(unsigned, uint8_t*, DataCommMode) { \
555 #ifdef GALOIS_ENABLE_GPU
557 #define GALOIS_SYNC_STRUCTURE_REDUCE_ADD_ARRAY(fieldname, fieldtype) \
558 struct Reduce_add_##fieldname { \
559 typedef fieldtype ValTy; \
561 static ValTy extract(uint32_t node_id, const struct NodeData& node) { \
562 if (personality == GPU_CUDA) \
563 return get_node_##fieldname##_cuda(cuda_ctx, node_id); \
564 assert(personality == CPU); \
565 return fieldname[node_id]; \
568 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
569 DataCommMode* data_mode) { \
570 if (personality == GPU_CUDA) { \
571 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, data_mode); \
574 assert(personality == CPU); \
578 static bool extract_batch(unsigned from_id, uint8_t* y) { \
579 if (personality == GPU_CUDA) { \
580 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
583 assert(personality == CPU); \
587 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
588 DataCommMode* data_mode) { \
589 if (personality == GPU_CUDA) { \
590 batch_get_reset_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, \
591 data_mode, (ValTy)0); \
594 assert(personality == CPU); \
598 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
599 if (personality == GPU_CUDA) { \
600 batch_get_reset_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
604 assert(personality == CPU); \
608 static bool reset_batch(size_t begin, size_t end) { \
609 if (personality == GPU_CUDA) { \
610 batch_reset_node_##fieldname##_cuda(cuda_ctx, begin, end, (ValTy)0); \
613 assert(personality == CPU); \
617 static bool reduce(uint32_t node_id, struct NodeData& GALOIS_UNUSED(node), \
619 if (personality == GPU_CUDA) { \
620 add_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
623 assert(personality == CPU); \
625 galois::add(fieldname[node_id], y); \
630 static bool reduce_batch(unsigned from_id, uint8_t* y, \
631 DataCommMode data_mode) { \
632 if (personality == GPU_CUDA) { \
633 batch_add_node_##fieldname##_cuda(cuda_ctx, from_id, y, data_mode); \
636 assert(personality == CPU); \
640 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
641 DataCommMode data_mode) { \
642 if (personality == GPU_CUDA) { \
643 batch_add_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
647 assert(personality == CPU); \
651 static void reset(uint32_t node_id, \
652 struct NodeData& GALOIS_UNUSED(node)) { \
653 if (personality == GPU_CUDA) { \
654 set_node_##fieldname##_cuda(cuda_ctx, node_id, (ValTy)0); \
655 } else if (personality == CPU) \
656 galois::set(fieldname[node_id], (ValTy)0); \
659 static void setVal(uint32_t node_id, struct NodeData& GALOIS_UNUSED(node), \
661 if (personality == GPU_CUDA) \
662 set_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
663 else if (personality == CPU) \
664 fieldname[node_id] = y; \
667 static bool setVal_batch(unsigned from_id, uint8_t* y, \
668 DataCommMode data_mode) { \
669 if (personality == GPU_CUDA) { \
670 batch_set_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
674 assert(personality == CPU); \
680 #define GALOIS_SYNC_STRUCTURE_REDUCE_ADD_ARRAY(fieldname, fieldtype) \
681 struct Reduce_add_##fieldname { \
682 typedef fieldtype ValTy; \
684 static ValTy extract(uint32_t node_id, const struct NodeData& node) { \
685 return fieldname[node_id]; \
688 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
689 DataCommMode* data_mode) { \
693 static bool extract_batch(unsigned from_id, uint8_t* y) { return false; } \
695 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
696 DataCommMode* data_mode) { \
700 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
704 static bool reset_batch(size_t GALOIS_UNUSED(begin), \
705 size_t GALOIS_UNUSED(end)) { \
709 static bool reduce(uint32_t node_id, struct NodeData& GALOIS_UNUSED(node), \
712 galois::add(fieldname[node_id], y); \
717 static bool reduce_batch(unsigned from_id, uint8_t* y, \
718 DataCommMode data_mode) { \
722 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
723 DataCommMode data_mode) { \
727 static void reset(uint32_t node_id, \
728 struct NodeData& GALOIS_UNUSED(node)) { \
729 galois::set(fieldname[node_id], (ValTy)0); \
732 static void setVal(uint32_t node_id, struct NodeData& GALOIS_UNUSED(node), \
734 fieldname[node_id] = y; \
737 static bool setVal_batch(unsigned from_id, uint8_t* y, \
738 DataCommMode data_mode) { \
751 #ifdef GALOIS_ENABLE_GPU
753 #define GALOIS_SYNC_STRUCTURE_REDUCE_SET(fieldname, fieldtype) \
754 struct Reduce_set_##fieldname { \
755 typedef fieldtype ValTy; \
757 static ValTy extract(uint32_t node_id, const struct NodeData& node) { \
758 if (personality == GPU_CUDA) \
759 return get_node_##fieldname##_cuda(cuda_ctx, node_id); \
760 assert(personality == CPU); \
761 return node.fieldname; \
764 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
765 DataCommMode* data_mode) { \
766 if (personality == GPU_CUDA) { \
767 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, data_mode); \
770 assert(personality == CPU); \
774 static bool extract_batch(unsigned from_id, uint8_t* y) { \
775 if (personality == GPU_CUDA) { \
776 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
779 assert(personality == CPU); \
783 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
784 DataCommMode* data_mode) { \
785 if (personality == GPU_CUDA) { \
786 batch_get_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, \
790 assert(personality == CPU); \
794 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
795 if (personality == GPU_CUDA) { \
796 batch_get_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
799 assert(personality == CPU); \
803 static bool reset_batch(size_t GALOIS_UNUSED(begin), \
804 size_t GALOIS_UNUSED(end)) { \
808 static bool reduce(uint32_t node_id, struct NodeData& node, ValTy y) { \
809 if (personality == GPU_CUDA) { \
810 set_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
813 assert(personality == CPU); \
815 galois::set(node.fieldname, y); \
820 static bool reduce_batch(unsigned from_id, uint8_t* y, \
821 DataCommMode data_mode) { \
822 if (personality == GPU_CUDA) { \
823 batch_set_node_##fieldname##_cuda(cuda_ctx, from_id, y, data_mode); \
826 assert(personality == CPU); \
830 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
831 DataCommMode data_mode) { \
832 if (personality == GPU_CUDA) { \
833 batch_set_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
837 assert(personality == CPU); \
841 static void reset(uint32_t GALOIS_UNUSED(node_id), \
842 struct NodeData& GALOIS_UNUSED(node)) {} \
844 static void setVal(uint32_t node_id, struct NodeData& node, ValTy y) { \
845 if (personality == GPU_CUDA) \
846 set_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
847 else if (personality == CPU) \
848 node.fieldname = y; \
851 static bool setVal_batch(unsigned from_id, uint8_t* y, \
852 DataCommMode data_mode) { \
853 if (personality == GPU_CUDA) { \
854 batch_set_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
858 assert(personality == CPU); \
864 #define GALOIS_SYNC_STRUCTURE_REDUCE_SET(fieldname, fieldtype) \
865 struct Reduce_set_##fieldname { \
866 typedef fieldtype ValTy; \
868 static ValTy extract(uint32_t, const struct NodeData& node) { \
869 return node.fieldname; \
872 static bool extract_batch(unsigned, uint8_t*, size_t*, DataCommMode*) { \
876 static bool extract_batch(unsigned, uint8_t*) { return false; } \
878 static bool extract_reset_batch(unsigned, uint8_t*, size_t*, \
883 static bool extract_reset_batch(unsigned, uint8_t*) { return false; } \
885 static bool reset_batch(size_t, size_t) { return true; } \
887 static bool reduce(uint32_t, struct NodeData& node, ValTy y) { \
889 galois::set(node.fieldname, y); \
894 static bool reduce_batch(unsigned, uint8_t*, DataCommMode) { \
898 static bool reduce_mirror_batch(unsigned, uint8_t*, DataCommMode) { \
902 static void reset(uint32_t, struct NodeData&) {} \
904 static void setVal(uint32_t, struct NodeData& node, ValTy y) { \
905 node.fieldname = y; \
908 static bool setVal_batch(unsigned, uint8_t*, DataCommMode) { \
918 #ifdef GALOIS_ENABLE_GPU
920 #define GALOIS_SYNC_STRUCTURE_REDUCE_SET_ARRAY(fieldname, fieldtype) \
921 struct Reduce_set_##fieldname { \
922 typedef fieldtype ValTy; \
924 static ValTy extract(uint32_t node_id, const struct NodeData& node) { \
925 if (personality == GPU_CUDA) \
926 return get_node_##fieldname##_cuda(cuda_ctx, node_id); \
927 assert(personality == CPU); \
928 return fieldname[node_id]; \
931 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
932 DataCommMode* data_mode) { \
933 if (personality == GPU_CUDA) { \
934 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, data_mode); \
937 assert(personality == CPU); \
941 static bool extract_batch(unsigned from_id, uint8_t* y) { \
942 if (personality == GPU_CUDA) { \
943 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
946 assert(personality == CPU); \
950 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
951 DataCommMode* data_mode) { \
952 if (personality == GPU_CUDA) { \
953 batch_get_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, \
957 assert(personality == CPU); \
961 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
962 if (personality == GPU_CUDA) { \
963 batch_get_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
966 assert(personality == CPU); \
970 static bool reset_batch(size_t GALOIS_UNUSED(begin), \
971 size_t GALOIS_UNUSED(end)) { \
975 static bool reduce(uint32_t node_id, struct NodeData& GALOIS_UNUSED(node), \
977 if (personality == GPU_CUDA) { \
978 set_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
981 assert(personality == CPU); \
983 galois::set(fieldname[node_id], y); \
988 static bool reduce_batch(unsigned from_id, uint8_t* y, \
989 DataCommMode data_mode) { \
990 if (personality == GPU_CUDA) { \
991 batch_set_node_##fieldname##_cuda(cuda_ctx, from_id, y, data_mode); \
994 assert(personality == CPU); \
998 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
999 DataCommMode data_mode) { \
1000 if (personality == GPU_CUDA) { \
1001 batch_set_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
1005 assert(personality == CPU); \
1009 static void reset(uint32_t GALOIS_UNUSED(node_id), \
1010 struct NodeData& GALOIS_UNUSED(node)) {} \
1012 static void setVal(uint32_t node_id, struct NodeData& GALOIS_UNUSED(node), \
1014 if (personality == GPU_CUDA) \
1015 set_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
1016 else if (personality == CPU) \
1017 fieldname[node_id] = y; \
1020 static bool setVal_batch(unsigned from_id, uint8_t* y, \
1021 DataCommMode data_mode) { \
1022 if (personality == GPU_CUDA) { \
1023 batch_set_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
1027 assert(personality == CPU); \
1033 #define GALOIS_SYNC_STRUCTURE_REDUCE_SET_ARRAY(fieldname, fieldtype) \
1034 struct Reduce_set_##fieldname { \
1035 typedef fieldtype ValTy; \
1037 static ValTy extract(uint32_t node_id, const struct NodeData& node) { \
1038 return fieldname[node_id]; \
1041 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
1042 DataCommMode* data_mode) { \
1046 static bool extract_batch(unsigned from_id, uint8_t* y) { return false; } \
1048 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
1049 DataCommMode* data_mode) { \
1053 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
1057 static bool reset_batch(size_t GALOIS_UNUSED(begin), \
1058 size_t GALOIS_UNUSED(end)) { \
1062 static bool reduce(uint32_t node_id, struct NodeData& GALOIS_UNUSED(node), \
1065 galois::set(fieldname[node_id], y); \
1070 static bool reduce_batch(unsigned from_id, uint8_t* y, \
1071 DataCommMode data_mode) { \
1075 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
1076 DataCommMode data_mode) { \
1080 static void reset(uint32_t GALOIS_UNUSED(node_id), \
1081 struct NodeData& GALOIS_UNUSED(node)) {} \
1083 static void setVal(uint32_t node_id, struct NodeData& GALOIS_UNUSED(node), \
1085 fieldname[node_id] = y; \
1088 static bool setVal_batch(unsigned from_id, uint8_t* y, \
1089 DataCommMode data_mode) { \
1102 #ifdef GALOIS_ENABLE_GPU
1104 #define GALOIS_SYNC_STRUCTURE_REDUCE_MIN(fieldname, fieldtype) \
1105 struct Reduce_min_##fieldname { \
1106 typedef fieldtype ValTy; \
1108 static ValTy extract(uint32_t node_id, const struct NodeData& node) { \
1109 if (personality == GPU_CUDA) \
1110 return get_node_##fieldname##_cuda(cuda_ctx, node_id); \
1111 assert(personality == CPU); \
1112 return node.fieldname; \
1115 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
1116 DataCommMode* data_mode) { \
1117 if (personality == GPU_CUDA) { \
1118 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, data_mode); \
1121 assert(personality == CPU); \
1125 static bool extract_batch(unsigned from_id, uint8_t* y) { \
1126 if (personality == GPU_CUDA) { \
1127 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
1130 assert(personality == CPU); \
1134 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
1135 DataCommMode* data_mode) { \
1136 if (personality == GPU_CUDA) { \
1137 batch_get_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, \
1141 assert(personality == CPU); \
1145 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
1146 if (personality == GPU_CUDA) { \
1147 batch_get_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
1150 assert(personality == CPU); \
1154 static bool reset_batch(size_t GALOIS_UNUSED(begin), \
1155 size_t GALOIS_UNUSED(end)) { \
1159 static bool reduce(uint32_t node_id, struct NodeData& node, ValTy y) { \
1160 if (personality == GPU_CUDA) { \
1161 return y < min_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
1163 assert(personality == CPU); \
1164 { return y < galois::min(node.fieldname, y); } \
1167 static bool reduce_batch(unsigned from_id, uint8_t* y, \
1168 DataCommMode data_mode) { \
1169 if (personality == GPU_CUDA) { \
1170 batch_min_node_##fieldname##_cuda(cuda_ctx, from_id, y, data_mode); \
1173 assert(personality == CPU); \
1177 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
1178 DataCommMode data_mode) { \
1179 if (personality == GPU_CUDA) { \
1180 batch_min_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
1184 assert(personality == CPU); \
1188 static void reset(uint32_t GALOIS_UNUSED(node_id), \
1189 struct NodeData& GALOIS_UNUSED(node)) {} \
1191 static void setVal(uint32_t node_id, struct NodeData& node, ValTy y) { \
1192 if (personality == GPU_CUDA) \
1193 set_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
1194 else if (personality == CPU) \
1195 node.fieldname = y; \
1198 static bool setVal_batch(unsigned from_id, uint8_t* y, \
1199 DataCommMode data_mode) { \
1200 if (personality == GPU_CUDA) { \
1201 batch_set_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
1205 assert(personality == CPU); \
1211 #define GALOIS_SYNC_STRUCTURE_REDUCE_MIN(fieldname, fieldtype) \
1212 struct Reduce_min_##fieldname { \
1213 typedef fieldtype ValTy; \
1215 static ValTy extract(uint32_t, const struct NodeData& node) { \
1216 return node.fieldname; \
1219 static bool extract_batch(unsigned, uint8_t*, size_t*, DataCommMode*) { \
1223 static bool extract_batch(unsigned, uint8_t*) { return false; } \
1225 static bool extract_reset_batch(unsigned, uint8_t*, size_t*, \
1230 static bool extract_reset_batch(unsigned, uint8_t*) { return false; } \
1232 static bool reset_batch(size_t, size_t) { return true; } \
1234 static bool reduce(uint32_t, struct NodeData& node, ValTy y) { \
1235 { return y < galois::min(node.fieldname, y); } \
1238 static bool reduce_batch(unsigned, uint8_t*, DataCommMode) { \
1242 static bool reduce_mirror_batch(unsigned, uint8_t*, DataCommMode) { \
1246 static void reset(uint32_t, struct NodeData&) {} \
1248 static void setVal(uint32_t, struct NodeData& node, ValTy y) { \
1249 node.fieldname = y; \
1252 static bool setVal_batch(unsigned, uint8_t*, DataCommMode) { \
1265 #ifdef GALOIS_ENABLE_GPU
1267 #define GALOIS_SYNC_STRUCTURE_REDUCE_MAX(fieldname, fieldtype) \
1268 struct Reduce_max_##fieldname { \
1269 typedef fieldtype ValTy; \
1271 static ValTy extract(uint32_t node_id, const struct NodeData& node) { \
1272 if (personality == GPU_CUDA) \
1273 return get_node_##fieldname##_cuda(cuda_ctx, node_id); \
1274 assert(personality == CPU); \
1275 return node.fieldname; \
1278 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
1279 DataCommMode* data_mode) { \
1280 if (personality == GPU_CUDA) { \
1281 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, data_mode); \
1284 assert(personality == CPU); \
1288 static bool extract_batch(unsigned from_id, uint8_t* y) { \
1289 if (personality == GPU_CUDA) { \
1290 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
1293 assert(personality == CPU); \
1297 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
1298 DataCommMode* data_mode) { \
1299 if (personality == GPU_CUDA) { \
1300 batch_get_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, \
1304 assert(personality == CPU); \
1308 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
1309 if (personality == GPU_CUDA) { \
1310 batch_get_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
1313 assert(personality == CPU); \
1317 static bool reset_batch(size_t GALOIS_UNUSED(begin), \
1318 size_t GALOIS_UNUSED(end)) { \
1322 static bool reduce(uint32_t node_id, struct NodeData& node, ValTy y) { \
1323 if (personality == GPU_CUDA) { \
1324 return y > max_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
1326 assert(personality == CPU); \
1327 { return y > galois::max(node.fieldname, y); } \
1330 static bool reduce_batch(unsigned from_id, uint8_t* y, \
1331 DataCommMode data_mode) { \
1332 if (personality == GPU_CUDA) { \
1333 batch_max_node_##fieldname##_cuda(cuda_ctx, from_id, y, data_mode); \
1336 assert(personality == CPU); \
1340 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
1341 DataCommMode data_mode) { \
1342 if (personality == GPU_CUDA) { \
1343 batch_max_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
1347 assert(personality == CPU); \
1351 static void reset(uint32_t GALOIS_UNUSED(node_id), \
1352 struct NodeData& GALOIS_UNUSED(node)) {} \
1354 static void setVal(uint32_t node_id, struct NodeData& node, ValTy y) { \
1355 if (personality == GPU_CUDA) \
1356 set_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
1357 else if (personality == CPU) \
1358 node.fieldname = y; \
1361 static bool setVal_batch(unsigned from_id, uint8_t* y, \
1362 DataCommMode data_mode) { \
1363 if (personality == GPU_CUDA) { \
1364 batch_set_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
1368 assert(personality == CPU); \
1374 #define GALOIS_SYNC_STRUCTURE_REDUCE_MAX(fieldname, fieldtype) \
1375 struct Reduce_max_##fieldname { \
1376 typedef fieldtype ValTy; \
1378 static ValTy extract(uint32_t node_id, const struct NodeData& node) { \
1379 return node.fieldname; \
1382 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
1383 DataCommMode* data_mode) { \
1387 static bool extract_batch(unsigned from_id, uint8_t* y) { return false; } \
1389 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
1390 DataCommMode* data_mode) { \
1394 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
1398 static bool reset_batch(size_t GALOIS_UNUSED(begin), \
1399 size_t GALOIS_UNUSED(end)) { \
1403 static bool reduce(uint32_t GALOIS_UNUSED(node_id), struct NodeData& node, \
1405 { return y > galois::max(node.fieldname, y); } \
1408 static bool reduce_batch(unsigned from_id, uint8_t* y, \
1409 DataCommMode data_mode) { \
1413 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
1414 DataCommMode data_mode) { \
1418 static void reset(uint32_t GALOIS_UNUSED(node_id), \
1419 struct NodeData& GALOIS_UNUSED(node)) {} \
1421 static void setVal(uint32_t GALOIS_UNUSED(node_id), struct NodeData& node, \
1423 node.fieldname = y; \
1426 static bool setVal_batch(unsigned from_id, uint8_t* y, \
1427 DataCommMode data_mode) { \
1437 #ifdef GALOIS_ENABLE_GPU
1439 #define GALOIS_SYNC_STRUCTURE_REDUCE_MIN_ARRAY(fieldname, fieldtype) \
1440 struct Reduce_min_##fieldname { \
1441 typedef fieldtype ValTy; \
1443 static ValTy extract(uint32_t node_id, const struct NodeData& node) { \
1444 if (personality == GPU_CUDA) \
1445 return get_node_##fieldname##_cuda(cuda_ctx, node_id); \
1446 assert(personality == CPU); \
1447 return fieldname[node_id]; \
1450 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
1451 DataCommMode* data_mode) { \
1452 if (personality == GPU_CUDA) { \
1453 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, data_mode); \
1456 assert(personality == CPU); \
1460 static bool extract_batch(unsigned from_id, uint8_t* y) { \
1461 if (personality == GPU_CUDA) { \
1462 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
1465 assert(personality == CPU); \
1469 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
1470 DataCommMode* data_mode) { \
1471 if (personality == GPU_CUDA) { \
1472 batch_get_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, \
1476 assert(personality == CPU); \
1480 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
1481 if (personality == GPU_CUDA) { \
1482 batch_get_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
1485 assert(personality == CPU); \
1489 static bool reset_batch(size_t GALOIS_UNUSED(begin), \
1490 size_t GALOIS_UNUSED(end)) { \
1494 static bool reduce(uint32_t node_id, struct NodeData& GALOIS_UNUSED(node), \
1496 if (personality == GPU_CUDA) { \
1497 return y < min_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
1499 assert(personality == CPU); \
1500 { return y < galois::min(fieldname[node_id], y); } \
1503 static bool reduce_batch(unsigned from_id, uint8_t* y, \
1504 DataCommMode data_mode) { \
1505 if (personality == GPU_CUDA) { \
1506 batch_min_node_##fieldname##_cuda(cuda_ctx, from_id, y, data_mode); \
1509 assert(personality == CPU); \
1513 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
1514 DataCommMode data_mode) { \
1515 if (personality == GPU_CUDA) { \
1516 batch_min_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
1520 assert(personality == CPU); \
1524 static void reset(uint32_t GALOIS_UNUSED(node_id), \
1525 struct NodeData& GALOIS_UNUSED(node)) {} \
1527 static void setVal(uint32_t node_id, struct NodeData& GALOIS_UNUSED(node), \
1529 if (personality == GPU_CUDA) \
1530 set_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
1531 else if (personality == CPU) \
1532 fieldname[node_id] = y; \
1535 static bool setVal_batch(unsigned from_id, uint8_t* y, \
1536 DataCommMode data_mode) { \
1537 if (personality == GPU_CUDA) { \
1538 batch_set_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
1542 assert(personality == CPU); \
1548 #define GALOIS_SYNC_STRUCTURE_REDUCE_MIN_ARRAY(fieldname, fieldtype) \
1549 struct Reduce_min_##fieldname { \
1550 typedef fieldtype ValTy; \
1552 static ValTy extract(uint32_t node_id, const struct NodeData& node) { \
1553 return fieldname[node_id]; \
1556 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
1557 DataCommMode* data_mode) { \
1561 static bool extract_batch(unsigned from_id, uint8_t* y) { return false; } \
1563 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
1564 DataCommMode* data_mode) { \
1568 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
1572 static bool reset_batch(size_t GALOIS_UNUSED(begin), \
1573 size_t GALOIS_UNUSED(end)) { \
1577 static bool reduce(uint32_t node_id, struct NodeData& GALOIS_UNUSED(node), \
1579 { return y < galois::min(fieldname[node_id], y); } \
1582 static bool reduce_batch(unsigned from_id, uint8_t* y, \
1583 DataCommMode data_mode) { \
1587 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
1588 DataCommMode data_mode) { \
1592 static void reset(uint32_t GALOIS_UNUSED(node_id), \
1593 struct NodeData& GALOIS_UNUSED(node)) {} \
1595 static void setVal(uint32_t node_id, struct NodeData& GALOIS_UNUSED(node), \
1597 fieldname[node_id] = y; \
1600 static bool setVal_batch(unsigned from_id, uint8_t* y, \
1601 DataCommMode data_mode) { \
1611 #ifdef GALOIS_ENABLE_GPU
1613 #define GALOIS_SYNC_STRUCTURE_REDUCE_PAIR_WISE_AVG_ARRAY(fieldname, fieldtype) \
1614 struct Reduce_pair_wise_avg_array_##fieldname { \
1615 typedef fieldtype ValTy; \
1617 static ValTy extract(uint32_t node_id, const struct NodeData& node) { \
1618 if (personality == GPU_CUDA) \
1619 return get_node_##fieldname##_cuda(cuda_ctx, node_id); \
1620 assert(personality == CPU); \
1621 return node.fieldname; \
1624 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
1625 DataCommMode* data_mode) { \
1626 if (personality == GPU_CUDA) { \
1627 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, data_mode); \
1630 assert(personality == CPU); \
1634 static bool extract_batch(unsigned from_id, uint8_t* y) { \
1635 if (personality == GPU_CUDA) { \
1636 batch_get_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
1639 assert(personality == CPU); \
1643 static bool extract_reset_batch(unsigned from_id, uint8_t* y, size_t* s, \
1644 DataCommMode* data_mode) { \
1645 if (personality == GPU_CUDA) { \
1646 batch_get_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, s, \
1650 assert(personality == CPU); \
1654 static bool extract_reset_batch(unsigned from_id, uint8_t* y) { \
1655 if (personality == GPU_CUDA) { \
1656 batch_get_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y); \
1659 assert(personality == CPU); \
1663 static bool reset_batch(size_t GALOIS_UNUSED(begin), \
1664 size_t GALOIS_UNUSED(end)) { \
1668 static bool reduce(uint32_t node_id, struct NodeData& node, ValTy y) { \
1669 if (personality == GPU_CUDA) { \
1670 set_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
1673 assert(personality == CPU); \
1675 galois::pairWiseAvg_vec(node.fieldname, y); \
1680 static bool reduce_batch(unsigned from_id, uint8_t* y, \
1681 DataCommMode data_mode) { \
1682 if (personality == GPU_CUDA) { \
1683 batch_set_node_##fieldname##_cuda(cuda_ctx, from_id, y, data_mode); \
1686 assert(personality == CPU); \
1690 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
1691 DataCommMode data_mode) { \
1692 if (personality == GPU_CUDA) { \
1693 batch_set_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
1697 assert(personality == CPU); \
1701 static void reset(uint32_t GALOIS_UNUSED(node_id), \
1702 struct NodeData& node) { \
1703 { galois::resetVec(node.fieldname); } \
1706 static void setVal(uint32_t node_id, struct NodeData& node, ValTy y) { \
1707 if (personality == GPU_CUDA) \
1708 set_node_##fieldname##_cuda(cuda_ctx, node_id, y); \
1709 else if (personality == CPU) \
1710 node.fieldname = y; \
1713 static bool setVal_batch(unsigned from_id, uint8_t* y, \
1714 DataCommMode data_mode) { \
1715 if (personality == GPU_CUDA) { \
1716 batch_set_mirror_node_##fieldname##_cuda(cuda_ctx, from_id, y, \
1720 assert(personality == CPU); \
1726 #define GALOIS_SYNC_STRUCTURE_REDUCE_PAIR_WISE_AVG_ARRAY(fieldname, fieldtype) \
1727 struct Reduce_pair_wise_avg_array_##fieldname { \
1728 typedef fieldtype ValTy; \
1730 static ValTy extract(uint32_t, const struct NodeData& node) { \
1731 return node.fieldname; \
1734 static bool extract_batch(unsigned, uint8_t*, size_t*, DataCommMode*) { \
1738 static bool extract_batch(unsigned, uint8_t*) { return false; } \
1740 static bool extract_reset_batch(unsigned, uint8_t*, size_t*, \
1745 static bool extract_reset_batch(unsigned, uint8_t*) { return false; } \
1747 static bool reset_batch(size_t, size_t) { return false; } \
1749 static bool reduce(uint32_t, struct NodeData& node, ValTy y) { \
1751 galois::pairWiseAvg_vec(node.fieldname, y); \
1756 static bool reduce_batch(unsigned, uint8_t*, DataCommMode) { \
1760 static bool reduce_mirror_batch(unsigned, uint8_t*, DataCommMode) { \
1764 static void reset(uint32_t, struct NodeData& node) { \
1765 { galois::resetVec(node.fieldname); } \
1768 static void setVal(uint32_t, struct NodeData& node, ValTy y) { \
1769 node.fieldname = y; \
1772 static bool setVal_batch(unsigned, uint8_t*, DataCommMode) { \
1782 #define GALOIS_SYNC_STRUCTURE_REDUCE_PAIR_WISE_ADD_ARRAY(fieldname, fieldtype) \
1783 struct Reduce_pair_wise_add_array_##fieldname { \
1784 typedef fieldtype ValTy; \
1786 static ValTy extract(uint32_t, const struct NodeData& node) { \
1787 return node.fieldname; \
1790 static bool extract_batch(unsigned, uint8_t*, size_t*, DataCommMode*) { \
1794 static bool extract_batch(unsigned, uint8_t*) { return false; } \
1796 static bool extract_reset_batch(unsigned, uint8_t*, size_t*, \
1801 static bool extract_reset_batch(unsigned, uint8_t*) { return false; } \
1803 static bool reset_batch(size_t, size_t) { return false; } \
1805 static bool reduce(uint32_t, struct NodeData& node, ValTy y) { \
1807 galois::addArray(node.fieldname, y); \
1812 static bool reduce_batch(unsigned, uint8_t*, DataCommMode) { \
1816 static bool reduce_mirror_batch(unsigned, uint8_t*, DataCommMode) { \
1820 static void reset(uint32_t, struct NodeData& node) { \
1821 { galois::resetVec(node.fieldname); } \
1824 static void setVal(uint32_t, struct NodeData& node, ValTy y) { \
1825 node.fieldname = y; \
1828 static bool setVal_batch(unsigned, uint8_t*, DataCommMode) { \
1837 #define GALOIS_SYNC_STRUCTURE_REDUCE_PAIR_WISE_ADD_ARRAY_SINGLE(fieldname, \
1839 struct Reduce_pair_wise_add_array_single_##fieldname { \
1840 typedef fieldtype ValTy; \
1842 static ValTy extract(uint32_t node_id, const struct NodeData& node, \
1843 unsigned vecIndex) { \
1844 return node.fieldname[vecIndex]; \
1847 static bool extract_batch(unsigned from_id, uint8_t* y, size_t* s, \
1848 DataCommMode* data_mode) { \
1852 static bool extract_batch(unsigned from_id, uint8_t* y) { return false; } \
1854 static bool extract_reset_batch(unsigned, uint8_t*, size_t*, \
1859 static bool extract_reset_batch(unsigned, uint8_t*) { return false; } \
1861 static bool reset_batch(size_t GALOIS_UNUSED(begin), \
1862 size_t GALOIS_UNUSED(end)) { \
1866 static bool reduce(uint32_t GALOIS_UNUSED(node_id), struct NodeData& node, \
1867 ValTy y, unsigned vecIndex) { \
1868 node.fieldname[vecIndex] = node.fieldname[vecIndex] + y; \
1872 static bool reduce_batch(unsigned, uint8_t*, size_t, DataCommMode) { \
1876 static bool reduce_mirror_batch(unsigned from_id, uint8_t* y, \
1877 DataCommMode data_mode) { \
1881 static void reset(uint32_t GALOIS_UNUSED(node_id), struct NodeData& node, \
1882 unsigned vecIndex) { \
1883 node.fieldname[vecIndex] = 0; \
1886 static void setVal(uint32_t GALOIS_UNUSED(node_id), struct NodeData& node, \
1887 ValTy y, unsigned vecIndex) { \
1888 node.fieldname[vecIndex] = y; \
1891 static void setVal(uint32_t GALOIS_UNUSED(node_id), \
1892 struct NodeData& GALOIS_UNUSED(node), ValTy y) { \
1893 GALOIS_DIE("execution shouldn't get here; needs index arg"); \
1896 static bool setVal_batch(unsigned uint8_t*, DataCommMode) { \
1915 #ifdef GALOIS_ENABLE_GPU
1917 #define GALOIS_SYNC_STRUCTURE_BITSET(fieldname) \
1918 struct Bitset_##fieldname { \
1919 static constexpr bool is_vector_bitset() { return false; } \
1920 static bool is_valid() { return true; } \
1922 static galois::DynamicBitSet& get() { \
1923 if (personality == GPU_CUDA) \
1924 get_bitset_##fieldname##_cuda( \
1925 cuda_ctx, (uint64_t*)bitset_##fieldname.get_vec().data()); \
1926 return bitset_##fieldname; \
1929 static void reset_range(size_t begin, size_t end) { \
1930 if (personality == GPU_CUDA) { \
1931 bitset_##fieldname##_reset_cuda(cuda_ctx, begin, end); \
1933 assert(personality == CPU); \
1934 bitset_##fieldname.reset(begin, end); \
1940 #define GALOIS_SYNC_STRUCTURE_BITSET(fieldname) \
1941 struct Bitset_##fieldname { \
1942 static constexpr bool is_vector_bitset() { return false; } \
1944 static constexpr bool is_valid() { return true; } \
1946 static galois::DynamicBitSet& get() { return bitset_##fieldname; } \
1948 static void reset_range(size_t begin, size_t end) { \
1949 bitset_##fieldname.reset(begin, end); \
1965 #define GALOIS_SYNC_STRUCTURE_VECTOR_BITSET(fieldname) \
1966 struct Bitset_##fieldname { \
1967 static unsigned numBitsets() { return vbitset_##fieldname.size(); } \
1969 static constexpr bool is_vector_bitset() { return true; } \
1971 static constexpr bool is_valid() { return true; } \
1973 static galois::DynamicBitSet& get(unsigned i) { \
1974 return vbitset_##fieldname[i]; \
1977 static void reset_range(size_t begin, size_t end) { \
1978 for (unsigned i = 0; i < vbitset_##fieldname.size(); i++) { \
1979 vbitset_##fieldname[i].reset(begin, end); \
1984 #endif // header guard
BITVECTOR_STATUS bitvectorStatus
Status of the bitvector in terms of if it can be used to sync the field.
Definition: SyncStructures.h:76
bool src_to_src() const
Return true if src2src is set.
Definition: SyncStructures.h:90
void make_dst_invalid(BITVECTOR_STATUS *bv_flag)
Marks destinations invalid on passed in bitvector flag.
Definition: SyncStructures.cpp:54
void clear_read_dst()
Sets write dst flags to false.
Definition: SyncStructures.h:128
void set_write_dst()
Sets write dst flags to true.
Definition: SyncStructures.h:108
sources on bitvector are invalid
Definition: SyncStructures.h:47
destinations on bitvector are invalid
Definition: SyncStructures.h:48
void clear_read_any()
Sets all write flags to false.
Definition: SyncStructures.h:134
Contains the DataCommMode enumeration and a function that chooses a data comm mode based on its argum...
FieldFlags()
Field Flags constructor.
Definition: SyncStructures.h:81
bool src_invalid(BITVECTOR_STATUS bv_flag)
Return true if the sources are invalid in bitvector flag.
Definition: SyncStructures.cpp:30
void make_src_invalid(BITVECTOR_STATUS *bv_flag)
Marks sources invalid on passed in bitvector flag.
Definition: SyncStructures.cpp:40
bool src_to_dst() const
Return true if src2dst is set.
Definition: SyncStructures.h:93
none of the bitvector is invalid
Definition: SyncStructures.h:46
void set_write_any()
Sets all write flags to true.
Definition: SyncStructures.h:114
void clear_read_src()
Sets write src flags to false.
Definition: SyncStructures.h:122
Definition: SyncStructures.h:49
BITVECTOR_STATUS
Bitvector status enum specifying validness of certain things in bitvector.
Definition: SyncStructures.h:45
Each field has a FieldFlags object that indicates synchronization status of that field.
Definition: SyncStructures.h:65
bool dst_invalid(BITVECTOR_STATUS bv_flag)
Return true if the destinations are invalid in bitvector flag.
Definition: SyncStructures.cpp:35
void clear_all()
Sets all write flags to false and sets bitvector stats to none invalid.
Definition: SyncStructures.h:142
bool dst_to_dst() const
Return true if dst2dst is set.
Definition: SyncStructures.h:99
bool dst_to_src() const
Return true if dst2src is set.
Definition: SyncStructures.h:96
void set_write_src()
Sets write src flags to true.
Definition: SyncStructures.h:102