restructure jobq to defer to deque, prevent global queue starvation
Haldean Brown
5 years ago
52 | 52 | |
53 | 53 | bool |
54 | 54 | ubik_deque_empty(struct ubik_deque *d); |
55 | ||
56 | /* Node structs are recycled internally by the deque; this frees all memory | |
57 | * associated with the recycler. Calling this will probably reduce memory usage | |
58 | * of the interpreter momentarily, but usage will eventually grow again when | |
59 | * more elements are enqueued and dequeued. This function, unlike the other | |
60 | * deque functions, is not MT-safe, and cannot be called while any other | |
61 | * threads are attempting to access any deque. */ | |
62 | void | |
63 | ubik_deque_empty_recycler(); |
27 | 27 | global queue. |
28 | 28 | */ |
29 | 29 | |
30 | #include "ubik/deque.h" | |
31 | ||
30 | 32 | #include <pthread.h> |
31 | 33 | #include <stdlib.h> |
32 | 34 | |
45 | 47 | struct ubik_jobq_node *tail; |
46 | 48 | struct ubik_jobq_node *recycle; |
47 | 49 | size_t size; |
50 | ||
51 | /* This counts the number of pops we've taken from the local queue | |
52 | * without checking the global queue. When this hits a threshold value, | |
53 | * we check the global queue for work. This prevents the evaluator from | |
54 | * spinning on the local queue while the tasks that are blocking those | |
55 | * local tasks from being evaluated are sitting on the global queue not | |
56 | * getting done. */ | |
57 | uint16_t since_global_check; | |
48 | 58 | }; |
49 | 59 | |
50 | 60 | struct ubik_jobq |
51 | 61 | { |
62 | struct ubik_deque d; | |
52 | 63 | struct ubik_jobq_subq *qs; |
53 | struct ubik_jobq_node *global_head; | |
54 | struct ubik_jobq_node *global_tail; | |
55 | pthread_mutex_t global_lock; | |
56 | 64 | size_t n_queues; |
57 | 65 | }; |
58 | 66 |
594 | 594 | ets = (struct eval_thread_state *) e; |
595 | 595 | evaluator = ets->e; |
596 | 596 | wid = ets->id; |
597 | free(ets); | |
598 | ||
597 | 599 | err = OK; |
598 | 600 | |
599 | 601 | while (!evaluator->die) |
21 | 21 | #include "ubik/jobq.h" |
22 | 22 | #include "ubik/util.h" |
23 | 23 | |
24 | #define CHECK_GLOBAL_PERIOD 20 | |
25 | ||
24 | 26 | static const size_t max_subqueue_size = 32; |
25 | 27 | |
26 | 28 | void |
27 | 29 | ubik_jobq_init(struct ubik_jobq *q, size_t n_workers) |
28 | 30 | { |
29 | pthread_mutex_init(&q->global_lock, NULL); | |
31 | ubik_deque_init(&q->d); | |
30 | 32 | ubik_galloc((void**) &q->qs, n_workers, sizeof(struct ubik_jobq_subq)); |
31 | 33 | q->n_queues = n_workers; |
32 | q->global_head = NULL; | |
33 | q->global_tail = NULL; | |
34 | 34 | } |
35 | 35 | |
36 | 36 | void |
40 | 40 | struct ubik_jobq_node *n; |
41 | 41 | |
42 | 42 | sq = q->qs + worker_id; |
43 | ||
44 | if (unlikely(sq->size >= max_subqueue_size)) | |
45 | { | |
46 | ubik_deque_pushl(&q->d, e); | |
47 | return; | |
48 | } | |
49 | ||
43 | 50 | if (sq->recycle != NULL) |
44 | 51 | { |
45 | 52 | n = sq->recycle; |
52 | 59 | } |
53 | 60 | n->elem = e; |
54 | 61 | |
55 | if (likely(sq->size < max_subqueue_size)) | |
56 | { | |
57 | n->right = sq->tail; | |
58 | if (sq->tail != NULL) | |
59 | sq->tail->left = n; | |
60 | else | |
61 | sq->head = n; | |
62 | sq->tail = n; | |
63 | sq->size++; | |
64 | return; | |
65 | } | |
66 | ||
67 | pthread_mutex_lock(&q->global_lock); | |
68 | n->right = q->global_tail; | |
69 | if (q->global_tail != NULL) | |
70 | q->global_tail->left = n; | |
71 | if (q->global_head == NULL) | |
72 | q->global_head = n; | |
73 | q->global_tail = n; | |
74 | pthread_mutex_unlock(&q->global_lock); | |
62 | n->right = sq->tail; | |
63 | if (sq->tail != NULL) | |
64 | sq->tail->left = n; | |
65 | else | |
66 | sq->head = n; | |
67 | sq->tail = n; | |
68 | sq->size++; | |
75 | 69 | } |
76 | 70 | |
77 | 71 | void * |
83 | 77 | |
84 | 78 | sq = q->qs + worker_id; |
85 | 79 | n = NULL; |
80 | sq->since_global_check++; | |
86 | 81 | |
87 | if (likely(sq->size > 0)) | |
82 | if (sq->since_global_check == CHECK_GLOBAL_PERIOD) | |
83 | { | |
84 | elem = ubik_deque_popr(&q->d); | |
85 | sq->since_global_check = 0; | |
86 | if (elem != NULL) | |
87 | return elem; | |
88 | } | |
89 | ||
90 | if (sq->size > 0) | |
88 | 91 | { |
89 | 92 | n = sq->head; |
90 | 93 | if (n != NULL) |
95 | 98 | else |
96 | 99 | sq->head->right = NULL; |
97 | 100 | sq->size--; |
101 | ||
102 | elem = n->elem; | |
103 | ||
104 | n->elem = NULL; | |
105 | n->left = NULL; | |
106 | n->right = sq->recycle; | |
107 | sq->recycle = n; | |
108 | ||
109 | return elem; | |
98 | 110 | } |
99 | 111 | } |
100 | if (n == NULL) | |
101 | { | |
102 | pthread_mutex_lock(&q->global_lock); | |
103 | n = q->global_head; | |
104 | if (n != NULL) | |
105 | q->global_head = n->left; | |
106 | if (q->global_head == NULL) | |
107 | q->global_tail = NULL; | |
108 | else | |
109 | q->global_head->right = NULL; | |
110 | pthread_mutex_unlock(&q->global_lock); | |
111 | } | |
112 | if (n == NULL) | |
113 | return NULL; | |
114 | 112 | |
115 | elem = n->elem; | |
116 | ||
117 | n->elem = NULL; | |
118 | n->left = NULL; | |
119 | n->right = sq->recycle; | |
120 | sq->recycle = n; | |
121 | ||
122 | return elem; | |
113 | sq->since_global_check = 0; | |
114 | return ubik_deque_popr(&q->d); | |
123 | 115 | } |
124 | 116 | |
125 | 117 | static void |
148 | 140 | free_ll(&sq->tail); |
149 | 141 | free_ll(&sq->recycle); |
150 | 142 | } |
151 | free_ll(&q->global_tail); | |
152 | 143 | free(q->qs); |
153 | 144 | } |
154 | 145 |
21 | 21 | |
22 | 22 | #include <stdatomic.h> |
23 | 23 | |
24 | /* Node recycling; this becomes a linked list of free nodes available for use. */ | |
25 | static struct ubik_deque_elem *elem_pool = NULL; | |
26 | ||
27 | static inline struct ubik_deque_elem * | |
28 | obtain() | |
29 | { | |
30 | struct ubik_deque_elem *e; | |
31 | ||
32 | for (;;) | |
33 | { | |
34 | e = atomic_load(&elem_pool); | |
35 | if (e == NULL) | |
36 | { | |
37 | e = calloc(1, sizeof(struct ubik_deque_elem)); | |
38 | ubik_assert(e != NULL); | |
39 | return e; | |
40 | } | |
41 | if (atomic_compare_exchange_weak(&elem_pool, &e, e->right)) | |
42 | return e; | |
43 | } | |
44 | } | |
45 | ||
46 | static inline void | |
47 | recycle(struct ubik_deque_elem *e) | |
48 | { | |
49 | struct ubik_deque_elem *current; | |
50 | ||
51 | for (;;) | |
52 | { | |
53 | current = elem_pool; | |
54 | if (atomic_compare_exchange_weak(&elem_pool, ¤t, e)) | |
55 | { | |
56 | e->right = current; | |
57 | return; | |
58 | } | |
59 | } | |
60 | } | |
61 | ||
62 | void | |
63 | ubik_deque_empty_recycler() | |
64 | { | |
65 | struct ubik_deque_elem *e; | |
66 | ||
67 | while (elem_pool != NULL) | |
68 | { | |
69 | e = elem_pool; | |
70 | elem_pool = e->right; | |
71 | free(e); | |
72 | } | |
73 | } | |
74 | ||
75 | 24 | void |
76 | 25 | ubik_deque_init(struct ubik_deque *d) |
77 | 26 | { |
78 | 27 | pthread_mutex_init(&d->lock, NULL); |
28 | d->left = NULL; | |
29 | d->right = NULL; | |
79 | 30 | } |
80 | 31 | |
81 | 32 | void |
82 | 33 | ubik_deque_pushl(struct ubik_deque *d, void *e) |
83 | 34 | { |
84 | 35 | struct ubik_deque_elem *elem; |
85 | elem = obtain(); | |
36 | ||
37 | ubik_galloc((void**) &elem, 1, sizeof(struct ubik_deque_elem)); | |
86 | 38 | |
87 | 39 | pthread_mutex_lock(&d->lock); |
88 | 40 | elem->e = e; |
100 | 52 | ubik_deque_pushr(struct ubik_deque *d, void *e) |
101 | 53 | { |
102 | 54 | struct ubik_deque_elem *elem; |
103 | elem = obtain(); | |
55 | ||
56 | ubik_galloc((void**) &elem, 1, sizeof(struct ubik_deque_elem)); | |
104 | 57 | |
105 | 58 | pthread_mutex_lock(&d->lock); |
106 | 59 | elem->e = e; |
136 | 89 | pthread_mutex_unlock(&d->lock); |
137 | 90 | |
138 | 91 | v = e->e; |
139 | recycle(e); | |
92 | free(e); | |
140 | 93 | return v; |
141 | 94 | } |
142 | 95 | |
162 | 115 | pthread_mutex_unlock(&d->lock); |
163 | 116 | |
164 | 117 | v = e->e; |
165 | recycle(e); | |
118 | free(e); | |
166 | 119 | return v; |
167 | 120 | } |
168 | 121 |
118 | 118 | return err; |
119 | 119 | |
120 | 120 | ubik_hooks_teardown(); |
121 | ubik_deque_empty_recycler(); | |
122 | 121 | return OK; |
123 | 122 | } |
27 | 27 | int x, y; |
28 | 28 | |
29 | 29 | ubik_jobq_init(&q, 2); |
30 | assert(ubik_deque_empty(&q.d)); | |
30 | 31 | |
31 | 32 | ubik_jobq_push(&q, 0, &x); |
32 | 33 | ubik_jobq_push(&q, 1, &y); |
33 | 34 | assert(q.qs[0].size == 1); |
34 | 35 | assert(q.qs[1].size == 1); |
35 | assert(q.global_head == NULL); | |
36 | assert(ubik_deque_empty(&q.d)); | |
36 | 37 | |
37 | 38 | assert(ubik_jobq_pop(&q, 0) == &x); |
38 | 39 | assert(ubik_jobq_pop(&q, 0) == NULL); |