Line data Source code
1 : /*
2 : Unix SMB/CIFS implementation.
3 : global locks based on dbwrap and messaging
4 : Copyright (C) 2009 by Volker Lendecke
5 :
6 : This program is free software; you can redistribute it and/or modify
7 : it under the terms of the GNU General Public License as published by
8 : the Free Software Foundation; either version 3 of the License, or
9 : (at your option) any later version.
10 :
11 : This program is distributed in the hope that it will be useful,
12 : but WITHOUT ANY WARRANTY; without even the implied warranty of
13 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 : GNU General Public License for more details.
15 :
16 : You should have received a copy of the GNU General Public License
17 : along with this program. If not, see <http://www.gnu.org/licenses/>.
18 : */
19 :
20 : #include "replace.h"
21 : #include "system/filesys.h"
22 : #include "lib/util/server_id.h"
23 : #include "lib/util/debug.h"
24 : #include "lib/util/talloc_stack.h"
25 : #include "lib/util/samba_util.h"
26 : #include "lib/util_path.h"
27 : #include "dbwrap/dbwrap.h"
28 : #include "dbwrap/dbwrap_open.h"
29 : #include "dbwrap/dbwrap_watch.h"
30 : #include "g_lock.h"
31 : #include "util_tdb.h"
32 : #include "../lib/util/tevent_ntstatus.h"
33 : #include "messages.h"
34 : #include "serverid.h"
35 :
36 : struct g_lock_ctx {
37 : struct db_context *db;
38 : struct messaging_context *msg;
39 : enum dbwrap_lock_order lock_order;
40 : };
41 :
42 : struct g_lock {
43 : struct server_id exclusive;
44 : size_t num_shared;
45 : uint8_t *shared;
46 : uint64_t unique_lock_epoch;
47 : uint64_t unique_data_epoch;
48 : size_t datalen;
49 : uint8_t *data;
50 : };
51 :
52 178532 : static bool g_lock_parse(uint8_t *buf, size_t buflen, struct g_lock *lck)
53 : {
54 : struct server_id exclusive;
55 : size_t num_shared, shared_len;
56 : uint64_t unique_lock_epoch;
57 : uint64_t unique_data_epoch;
58 :
59 178532 : if (buflen < (SERVER_ID_BUF_LENGTH + /* exclusive */
60 : sizeof(uint64_t) + /* seqnum */
61 : sizeof(uint32_t))) { /* num_shared */
62 27057 : struct g_lock ret = {
63 : .exclusive.pid = 0,
64 9019 : .unique_lock_epoch = generate_unique_u64(0),
65 9019 : .unique_data_epoch = generate_unique_u64(0),
66 : };
67 9019 : *lck = ret;
68 9019 : return true;
69 : }
70 :
71 169513 : server_id_get(&exclusive, buf);
72 169513 : buf += SERVER_ID_BUF_LENGTH;
73 169513 : buflen -= SERVER_ID_BUF_LENGTH;
74 :
75 169513 : unique_lock_epoch = BVAL(buf, 0);
76 169513 : buf += sizeof(uint64_t);
77 169513 : buflen -= sizeof(uint64_t);
78 :
79 169513 : unique_data_epoch = BVAL(buf, 0);
80 169513 : buf += sizeof(uint64_t);
81 169513 : buflen -= sizeof(uint64_t);
82 :
83 169513 : num_shared = IVAL(buf, 0);
84 169513 : buf += sizeof(uint32_t);
85 169513 : buflen -= sizeof(uint32_t);
86 :
87 169513 : if (num_shared > buflen/SERVER_ID_BUF_LENGTH) {
88 0 : DBG_DEBUG("num_shared=%zu, buflen=%zu\n",
89 : num_shared,
90 : buflen);
91 0 : return false;
92 : }
93 :
94 169513 : shared_len = num_shared * SERVER_ID_BUF_LENGTH;
95 :
96 169513 : *lck = (struct g_lock) {
97 : .exclusive = exclusive,
98 : .num_shared = num_shared,
99 : .shared = buf,
100 : .unique_lock_epoch = unique_lock_epoch,
101 : .unique_data_epoch = unique_data_epoch,
102 169513 : .datalen = buflen-shared_len,
103 169513 : .data = buf+shared_len,
104 : };
105 :
106 169513 : return true;
107 : }
108 :
109 2 : static void g_lock_get_shared(const struct g_lock *lck,
110 : size_t i,
111 : struct server_id *shared)
112 : {
113 2 : if (i >= lck->num_shared) {
114 0 : abort();
115 : }
116 2 : server_id_get(shared, lck->shared + i*SERVER_ID_BUF_LENGTH);
117 2 : }
118 :
119 2 : static void g_lock_del_shared(struct g_lock *lck, size_t i)
120 : {
121 2 : if (i >= lck->num_shared) {
122 0 : abort();
123 : }
124 2 : lck->num_shared -= 1;
125 2 : if (i < lck->num_shared) {
126 0 : memcpy(lck->shared + i*SERVER_ID_BUF_LENGTH,
127 0 : lck->shared + lck->num_shared*SERVER_ID_BUF_LENGTH,
128 : SERVER_ID_BUF_LENGTH);
129 : }
130 2 : }
131 :
132 86455 : static NTSTATUS g_lock_store(
133 : struct db_record *rec,
134 : struct g_lock *lck,
135 : struct server_id *new_shared,
136 : const TDB_DATA *new_dbufs,
137 : size_t num_new_dbufs)
138 86455 : {
139 : uint8_t exclusive[SERVER_ID_BUF_LENGTH];
140 : uint8_t seqnum_buf[sizeof(uint64_t)*2];
141 : uint8_t sizebuf[sizeof(uint32_t)];
142 : uint8_t new_shared_buf[SERVER_ID_BUF_LENGTH];
143 :
144 86455 : struct TDB_DATA dbufs[6 + num_new_dbufs];
145 :
146 86455 : dbufs[0] = (TDB_DATA) {
147 : .dptr = exclusive, .dsize = sizeof(exclusive),
148 : };
149 86455 : dbufs[1] = (TDB_DATA) {
150 : .dptr = seqnum_buf, .dsize = sizeof(seqnum_buf),
151 : };
152 86455 : dbufs[2] = (TDB_DATA) {
153 : .dptr = sizebuf, .dsize = sizeof(sizebuf),
154 : };
155 86455 : dbufs[3] = (TDB_DATA) {
156 86455 : .dptr = lck->shared,
157 86455 : .dsize = lck->num_shared * SERVER_ID_BUF_LENGTH,
158 : };
159 86455 : dbufs[4] = (TDB_DATA) { 0 };
160 86455 : dbufs[5] = (TDB_DATA) {
161 86455 : .dptr = lck->data, .dsize = lck->datalen,
162 : };
163 :
164 86455 : if (num_new_dbufs != 0) {
165 43336 : memcpy(&dbufs[6],
166 : new_dbufs,
167 : num_new_dbufs * sizeof(TDB_DATA));
168 : }
169 :
170 86455 : server_id_put(exclusive, lck->exclusive);
171 86455 : SBVAL(seqnum_buf, 0, lck->unique_lock_epoch);
172 86455 : SBVAL(seqnum_buf, 8, lck->unique_data_epoch);
173 :
174 86455 : if (new_shared != NULL) {
175 2 : if (lck->num_shared >= UINT32_MAX) {
176 0 : return NT_STATUS_BUFFER_OVERFLOW;
177 : }
178 :
179 2 : server_id_put(new_shared_buf, *new_shared);
180 :
181 2 : dbufs[4] = (TDB_DATA) {
182 : .dptr = new_shared_buf,
183 : .dsize = sizeof(new_shared_buf),
184 : };
185 :
186 2 : lck->num_shared += 1;
187 : }
188 :
189 86455 : SIVAL(sizebuf, 0, lck->num_shared);
190 :
191 86455 : return dbwrap_record_storev(rec, dbufs, ARRAY_SIZE(dbufs), 0);
192 : }
193 :
194 134 : struct g_lock_ctx *g_lock_ctx_init_backend(
195 : TALLOC_CTX *mem_ctx,
196 : struct messaging_context *msg,
197 : struct db_context **backend)
198 : {
199 : struct g_lock_ctx *result;
200 :
201 134 : result = talloc(mem_ctx, struct g_lock_ctx);
202 134 : if (result == NULL) {
203 0 : return NULL;
204 : }
205 134 : result->msg = msg;
206 134 : result->lock_order = DBWRAP_LOCK_ORDER_NONE;
207 :
208 134 : result->db = db_open_watched(result, backend, msg);
209 134 : if (result->db == NULL) {
210 0 : DBG_WARNING("db_open_watched failed\n");
211 0 : TALLOC_FREE(result);
212 0 : return NULL;
213 : }
214 134 : return result;
215 : }
216 :
217 73 : void g_lock_set_lock_order(struct g_lock_ctx *ctx,
218 : enum dbwrap_lock_order lock_order)
219 : {
220 73 : ctx->lock_order = lock_order;
221 73 : }
222 :
223 61 : struct g_lock_ctx *g_lock_ctx_init(TALLOC_CTX *mem_ctx,
224 : struct messaging_context *msg)
225 : {
226 61 : char *db_path = NULL;
227 61 : struct db_context *backend = NULL;
228 61 : struct g_lock_ctx *ctx = NULL;
229 :
230 61 : db_path = lock_path(mem_ctx, "g_lock.tdb");
231 61 : if (db_path == NULL) {
232 0 : return NULL;
233 : }
234 :
235 61 : backend = db_open(
236 : mem_ctx,
237 : db_path,
238 : 0,
239 : TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH|TDB_VOLATILE,
240 : O_RDWR|O_CREAT,
241 : 0600,
242 : DBWRAP_LOCK_ORDER_3,
243 : DBWRAP_FLAG_NONE);
244 61 : TALLOC_FREE(db_path);
245 61 : if (backend == NULL) {
246 0 : DBG_WARNING("Could not open g_lock.tdb\n");
247 0 : return NULL;
248 : }
249 :
250 61 : ctx = g_lock_ctx_init_backend(mem_ctx, msg, &backend);
251 61 : return ctx;
252 : }
253 :
254 1365 : static void g_lock_cleanup_dead(
255 : struct g_lock *lck,
256 : struct server_id *dead_blocker)
257 : {
258 : bool exclusive_died;
259 : struct server_id_buf tmp;
260 :
261 1365 : if (dead_blocker == NULL) {
262 1365 : return;
263 : }
264 :
265 0 : exclusive_died = server_id_equal(dead_blocker, &lck->exclusive);
266 :
267 0 : if (exclusive_died) {
268 0 : DBG_DEBUG("Exclusive holder %s died\n",
269 : server_id_str_buf(lck->exclusive, &tmp));
270 0 : lck->exclusive.pid = 0;
271 : }
272 :
273 0 : if (lck->num_shared != 0) {
274 : bool shared_died;
275 : struct server_id shared;
276 :
277 0 : g_lock_get_shared(lck, 0, &shared);
278 0 : shared_died = server_id_equal(dead_blocker, &shared);
279 :
280 0 : if (shared_died) {
281 0 : DBG_DEBUG("Shared holder %s died\n",
282 : server_id_str_buf(shared, &tmp));
283 0 : g_lock_del_shared(lck, 0);
284 : }
285 : }
286 : }
287 :
288 899 : static ssize_t g_lock_find_shared(
289 : struct g_lock *lck,
290 : const struct server_id *self)
291 : {
292 : size_t i;
293 :
294 899 : for (i=0; i<lck->num_shared; i++) {
295 : struct server_id shared;
296 : bool same;
297 :
298 0 : g_lock_get_shared(lck, i, &shared);
299 :
300 0 : same = server_id_equal(self, &shared);
301 0 : if (same) {
302 0 : return i;
303 : }
304 : }
305 :
306 899 : return -1;
307 : }
308 :
309 899 : static void g_lock_cleanup_shared(struct g_lock *lck)
310 : {
311 : size_t i;
312 : struct server_id check;
313 : bool exists;
314 :
315 899 : if (lck->num_shared == 0) {
316 899 : return;
317 : }
318 :
319 : /*
320 : * Read locks can stay around forever if the process dies. Do
321 : * a heuristic check for process existence: Check one random
322 : * process for existence. Hopefully this will keep runaway
323 : * read locks under control.
324 : */
325 0 : i = generate_random() % lck->num_shared;
326 0 : g_lock_get_shared(lck, i, &check);
327 :
328 0 : exists = serverid_exists(&check);
329 0 : if (!exists) {
330 : struct server_id_buf tmp;
331 0 : DBG_DEBUG("Shared locker %s died -- removing\n",
332 : server_id_str_buf(check, &tmp));
333 0 : g_lock_del_shared(lck, i);
334 : }
335 : }
336 :
337 : struct g_lock_lock_state {
338 : struct tevent_context *ev;
339 : struct g_lock_ctx *ctx;
340 : TDB_DATA key;
341 : enum g_lock_type type;
342 : bool retry;
343 : };
344 :
345 : struct g_lock_lock_fn_state {
346 : struct g_lock_lock_state *req_state;
347 : struct server_id *dead_blocker;
348 :
349 : struct tevent_req *watch_req;
350 : uint64_t watch_instance;
351 : NTSTATUS status;
352 : };
353 :
354 : static int g_lock_lock_state_destructor(struct g_lock_lock_state *s);
355 :
356 1365 : static NTSTATUS g_lock_trylock(
357 : struct db_record *rec,
358 : struct g_lock_lock_fn_state *state,
359 : TDB_DATA data,
360 : struct server_id *blocker)
361 : {
362 1365 : struct g_lock_lock_state *req_state = state->req_state;
363 1365 : struct server_id self = messaging_server_id(req_state->ctx->msg);
364 1365 : enum g_lock_type type = req_state->type;
365 1365 : bool retry = req_state->retry;
366 1365 : struct g_lock lck = { .exclusive.pid = 0 };
367 : size_t orig_num_shared;
368 : struct server_id_buf tmp;
369 : NTSTATUS status;
370 : bool ok;
371 :
372 1365 : ok = g_lock_parse(data.dptr, data.dsize, &lck);
373 1365 : if (!ok) {
374 0 : dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
375 0 : DBG_DEBUG("g_lock_parse failed\n");
376 0 : return NT_STATUS_INTERNAL_DB_CORRUPTION;
377 : }
378 1365 : orig_num_shared = lck.num_shared;
379 :
380 1365 : g_lock_cleanup_dead(&lck, state->dead_blocker);
381 :
382 1365 : lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
383 :
384 1365 : if (lck.exclusive.pid != 0) {
385 464 : bool self_exclusive = server_id_equal(&self, &lck.exclusive);
386 :
387 464 : if (!self_exclusive) {
388 464 : bool exists = serverid_exists(&lck.exclusive);
389 464 : if (!exists) {
390 0 : lck.exclusive = (struct server_id) { .pid=0 };
391 0 : goto noexclusive;
392 : }
393 :
394 464 : DBG_DEBUG("%s has an exclusive lock\n",
395 : server_id_str_buf(lck.exclusive, &tmp));
396 :
397 464 : if (type == G_LOCK_DOWNGRADE) {
398 : struct server_id_buf tmp2;
399 :
400 0 : dbwrap_watched_watch_remove_instance(rec,
401 : state->watch_instance);
402 :
403 0 : DBG_DEBUG("%s: Trying to downgrade %s\n",
404 : server_id_str_buf(self, &tmp),
405 : server_id_str_buf(
406 : lck.exclusive, &tmp2));
407 0 : return NT_STATUS_NOT_LOCKED;
408 : }
409 :
410 464 : if (type == G_LOCK_UPGRADE) {
411 : ssize_t shared_idx;
412 :
413 0 : dbwrap_watched_watch_remove_instance(rec,
414 : state->watch_instance);
415 :
416 0 : shared_idx = g_lock_find_shared(&lck, &self);
417 :
418 0 : if (shared_idx == -1) {
419 0 : DBG_DEBUG("Trying to upgrade %s "
420 : "without "
421 : "existing shared lock\n",
422 : server_id_str_buf(
423 : self, &tmp));
424 0 : return NT_STATUS_NOT_LOCKED;
425 : }
426 :
427 : /*
428 : * We're trying to upgrade, and the
429 : * exlusive lock is taken by someone
430 : * else. This means that someone else
431 : * is waiting for us to give up our
432 : * shared lock. If we now also wait
433 : * for someone to give their shared
434 : * lock, we will deadlock.
435 : */
436 :
437 0 : DBG_DEBUG("Trying to upgrade %s while "
438 : "someone else is also "
439 : "trying to upgrade\n",
440 : server_id_str_buf(self, &tmp));
441 0 : return NT_STATUS_POSSIBLE_DEADLOCK;
442 : }
443 :
444 464 : DBG_DEBUG("Waiting for lck.exclusive=%s\n",
445 : server_id_str_buf(lck.exclusive, &tmp));
446 :
447 : /*
448 : * We will return NT_STATUS_LOCK_NOT_GRANTED
449 : * and need to monitor the record.
450 : *
451 : * If we don't have a watcher instance yet,
452 : * we should add one.
453 : */
454 464 : if (state->watch_instance == 0) {
455 407 : state->watch_instance =
456 407 : dbwrap_watched_watch_add_instance(rec);
457 : }
458 :
459 464 : *blocker = lck.exclusive;
460 464 : return NT_STATUS_LOCK_NOT_GRANTED;
461 : }
462 :
463 0 : if (type == G_LOCK_DOWNGRADE) {
464 0 : DBG_DEBUG("Downgrading %s from WRITE to READ\n",
465 : server_id_str_buf(self, &tmp));
466 :
467 0 : lck.exclusive = (struct server_id) { .pid = 0 };
468 0 : goto do_shared;
469 : }
470 :
471 0 : if (!retry) {
472 0 : dbwrap_watched_watch_remove_instance(rec,
473 : state->watch_instance);
474 :
475 0 : DBG_DEBUG("%s already locked by self\n",
476 : server_id_str_buf(self, &tmp));
477 0 : return NT_STATUS_WAS_LOCKED;
478 : }
479 :
480 0 : g_lock_cleanup_shared(&lck);
481 :
482 0 : if (lck.num_shared != 0) {
483 0 : g_lock_get_shared(&lck, 0, blocker);
484 :
485 0 : DBG_DEBUG("Continue waiting for shared lock %s\n",
486 : server_id_str_buf(*blocker, &tmp));
487 :
488 : /*
489 : * We will return NT_STATUS_LOCK_NOT_GRANTED
490 : * and need to monitor the record.
491 : *
492 : * If we don't have a watcher instance yet,
493 : * we should add one.
494 : */
495 0 : if (state->watch_instance == 0) {
496 0 : state->watch_instance =
497 0 : dbwrap_watched_watch_add_instance(rec);
498 : }
499 :
500 0 : return NT_STATUS_LOCK_NOT_GRANTED;
501 : }
502 :
503 : /*
504 : * All pending readers are gone and we no longer need
505 : * to monitor the record.
506 : */
507 0 : dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
508 :
509 0 : if (orig_num_shared != lck.num_shared) {
510 0 : status = g_lock_store(rec, &lck, NULL, NULL, 0);
511 0 : if (!NT_STATUS_IS_OK(status)) {
512 0 : DBG_DEBUG("g_lock_store() failed: %s\n",
513 : nt_errstr(status));
514 0 : return status;
515 : }
516 : }
517 :
518 0 : talloc_set_destructor(req_state, NULL);
519 :
520 : /*
521 : * Retry after a conflicting lock was released
522 : */
523 0 : return NT_STATUS_OK;
524 : }
525 :
526 901 : noexclusive:
527 :
528 901 : if (type == G_LOCK_UPGRADE) {
529 0 : ssize_t shared_idx = g_lock_find_shared(&lck, &self);
530 :
531 0 : if (shared_idx == -1) {
532 0 : dbwrap_watched_watch_remove_instance(rec,
533 : state->watch_instance);
534 :
535 0 : DBG_DEBUG("Trying to upgrade %s without "
536 : "existing shared lock\n",
537 : server_id_str_buf(self, &tmp));
538 0 : return NT_STATUS_NOT_LOCKED;
539 : }
540 :
541 0 : g_lock_del_shared(&lck, shared_idx);
542 0 : type = G_LOCK_WRITE;
543 : }
544 :
545 901 : if (type == G_LOCK_WRITE) {
546 899 : ssize_t shared_idx = g_lock_find_shared(&lck, &self);
547 :
548 899 : if (shared_idx != -1) {
549 0 : dbwrap_watched_watch_remove_instance(rec,
550 : state->watch_instance);
551 0 : DBG_DEBUG("Trying to writelock existing shared %s\n",
552 : server_id_str_buf(self, &tmp));
553 0 : return NT_STATUS_WAS_LOCKED;
554 : }
555 :
556 899 : lck.exclusive = self;
557 :
558 899 : g_lock_cleanup_shared(&lck);
559 :
560 899 : if (lck.num_shared == 0) {
561 : /*
562 : * If we store ourself as exclusive writter,
563 : * without any pending readers, we don't
564 : * need to monitor the record anymore...
565 : */
566 899 : dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
567 0 : } else if (state->watch_instance == 0) {
568 : /*
569 : * Here we have lck.num_shared != 0.
570 : *
571 : * We will return NT_STATUS_LOCK_NOT_GRANTED
572 : * below.
573 : *
574 : * And don't have a watcher instance yet!
575 : *
576 : * We add it here before g_lock_store()
577 : * in order to trigger just one
578 : * low level dbwrap_do_locked() call.
579 : */
580 0 : state->watch_instance =
581 0 : dbwrap_watched_watch_add_instance(rec);
582 : }
583 :
584 899 : status = g_lock_store(rec, &lck, NULL, NULL, 0);
585 899 : if (!NT_STATUS_IS_OK(status)) {
586 0 : DBG_DEBUG("g_lock_store() failed: %s\n",
587 : nt_errstr(status));
588 0 : return status;
589 : }
590 :
591 899 : if (lck.num_shared != 0) {
592 0 : talloc_set_destructor(
593 : req_state, g_lock_lock_state_destructor);
594 :
595 0 : g_lock_get_shared(&lck, 0, blocker);
596 :
597 0 : DBG_DEBUG("Waiting for %zu shared locks, "
598 : "picking blocker %s\n",
599 : lck.num_shared,
600 : server_id_str_buf(*blocker, &tmp));
601 :
602 0 : return NT_STATUS_LOCK_NOT_GRANTED;
603 : }
604 :
605 899 : talloc_set_destructor(req_state, NULL);
606 :
607 899 : return NT_STATUS_OK;
608 : }
609 :
610 2 : do_shared:
611 :
612 : /*
613 : * We are going to store us as a reader,
614 : * so we got what we were waiting for.
615 : *
616 : * So we no longer need to monitor the
617 : * record.
618 : */
619 2 : dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
620 :
621 2 : if (lck.num_shared == 0) {
622 2 : status = g_lock_store(rec, &lck, &self, NULL, 0);
623 2 : if (!NT_STATUS_IS_OK(status)) {
624 0 : DBG_DEBUG("g_lock_store() failed: %s\n",
625 : nt_errstr(status));
626 : }
627 :
628 2 : return status;
629 : }
630 :
631 0 : g_lock_cleanup_shared(&lck);
632 :
633 0 : status = g_lock_store(rec, &lck, &self, NULL, 0);
634 0 : if (!NT_STATUS_IS_OK(status)) {
635 0 : DBG_DEBUG("g_lock_store() failed: %s\n",
636 : nt_errstr(status));
637 0 : return status;
638 : }
639 :
640 0 : return NT_STATUS_OK;
641 : }
642 :
643 1365 : static void g_lock_lock_fn(
644 : struct db_record *rec,
645 : TDB_DATA value,
646 : void *private_data)
647 : {
648 1365 : struct g_lock_lock_fn_state *state = private_data;
649 1365 : struct server_id blocker = {0};
650 :
651 : /*
652 : * We're trying to get a lock and if we are
653 : * successful in doing that, we should not
654 : * wakeup any other waiters, all they would
655 : * find is that we're holding a lock they
656 : * are conflicting with.
657 : */
658 1365 : dbwrap_watched_watch_skip_alerting(rec);
659 :
660 1365 : state->status = g_lock_trylock(rec, state, value, &blocker);
661 1365 : if (!NT_STATUS_IS_OK(state->status)) {
662 464 : DBG_DEBUG("g_lock_trylock returned %s\n",
663 : nt_errstr(state->status));
664 : }
665 1365 : if (!NT_STATUS_EQUAL(state->status, NT_STATUS_LOCK_NOT_GRANTED)) {
666 901 : return;
667 : }
668 :
669 1392 : state->watch_req = dbwrap_watched_watch_send(
670 928 : state->req_state, state->req_state->ev, rec, state->watch_instance, blocker);
671 464 : if (state->watch_req == NULL) {
672 0 : state->status = NT_STATUS_NO_MEMORY;
673 : }
674 : }
675 :
676 0 : static int g_lock_lock_state_destructor(struct g_lock_lock_state *s)
677 : {
678 0 : NTSTATUS status = g_lock_unlock(s->ctx, s->key);
679 0 : if (!NT_STATUS_IS_OK(status)) {
680 0 : DBG_DEBUG("g_lock_unlock failed: %s\n", nt_errstr(status));
681 : }
682 0 : return 0;
683 : }
684 :
685 : static void g_lock_lock_retry(struct tevent_req *subreq);
686 :
687 901 : struct tevent_req *g_lock_lock_send(TALLOC_CTX *mem_ctx,
688 : struct tevent_context *ev,
689 : struct g_lock_ctx *ctx,
690 : TDB_DATA key,
691 : enum g_lock_type type)
692 : {
693 : struct tevent_req *req;
694 : struct g_lock_lock_state *state;
695 : struct g_lock_lock_fn_state fn_state;
696 : NTSTATUS status;
697 : bool ok;
698 :
699 901 : req = tevent_req_create(mem_ctx, &state, struct g_lock_lock_state);
700 901 : if (req == NULL) {
701 0 : return NULL;
702 : }
703 901 : state->ev = ev;
704 901 : state->ctx = ctx;
705 901 : state->key = key;
706 901 : state->type = type;
707 :
708 901 : fn_state = (struct g_lock_lock_fn_state) {
709 : .req_state = state,
710 : };
711 :
712 901 : status = dbwrap_do_locked(ctx->db, key, g_lock_lock_fn, &fn_state);
713 901 : if (tevent_req_nterror(req, status)) {
714 0 : DBG_DEBUG("dbwrap_do_locked failed: %s\n",
715 : nt_errstr(status));
716 0 : return tevent_req_post(req, ev);
717 : }
718 :
719 901 : if (NT_STATUS_IS_OK(fn_state.status)) {
720 494 : tevent_req_done(req);
721 494 : return tevent_req_post(req, ev);
722 : }
723 407 : if (!NT_STATUS_EQUAL(fn_state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
724 0 : tevent_req_nterror(req, fn_state.status);
725 0 : return tevent_req_post(req, ev);
726 : }
727 :
728 407 : if (tevent_req_nomem(fn_state.watch_req, req)) {
729 0 : return tevent_req_post(req, ev);
730 : }
731 :
732 814 : ok = tevent_req_set_endtime(
733 : fn_state.watch_req,
734 407 : state->ev,
735 407 : timeval_current_ofs(5 + generate_random() % 5, 0));
736 407 : if (!ok) {
737 0 : tevent_req_oom(req);
738 0 : return tevent_req_post(req, ev);
739 : }
740 407 : tevent_req_set_callback(fn_state.watch_req, g_lock_lock_retry, req);
741 :
742 407 : return req;
743 : }
744 :
745 464 : static void g_lock_lock_retry(struct tevent_req *subreq)
746 : {
747 464 : struct tevent_req *req = tevent_req_callback_data(
748 : subreq, struct tevent_req);
749 464 : struct g_lock_lock_state *state = tevent_req_data(
750 : req, struct g_lock_lock_state);
751 : struct g_lock_lock_fn_state fn_state;
752 464 : struct server_id blocker = { .pid = 0 };
753 464 : bool blockerdead = false;
754 : NTSTATUS status;
755 464 : uint64_t instance = 0;
756 :
757 464 : status = dbwrap_watched_watch_recv(subreq, &instance, &blockerdead, &blocker);
758 464 : DBG_DEBUG("watch_recv returned %s\n", nt_errstr(status));
759 464 : TALLOC_FREE(subreq);
760 :
761 464 : if (!NT_STATUS_IS_OK(status) &&
762 0 : !NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
763 0 : tevent_req_nterror(req, status);
764 0 : return;
765 : }
766 :
767 464 : state->retry = true;
768 :
769 464 : fn_state = (struct g_lock_lock_fn_state) {
770 : .req_state = state,
771 464 : .dead_blocker = blockerdead ? &blocker : NULL,
772 : .watch_instance = instance,
773 : };
774 :
775 464 : status = dbwrap_do_locked(state->ctx->db, state->key,
776 : g_lock_lock_fn, &fn_state);
777 464 : if (tevent_req_nterror(req, status)) {
778 0 : DBG_DEBUG("dbwrap_do_locked failed: %s\n",
779 : nt_errstr(status));
780 0 : return;
781 : }
782 :
783 464 : if (NT_STATUS_IS_OK(fn_state.status)) {
784 407 : tevent_req_done(req);
785 407 : return;
786 : }
787 57 : if (!NT_STATUS_EQUAL(fn_state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
788 0 : tevent_req_nterror(req, fn_state.status);
789 0 : return;
790 : }
791 :
792 57 : if (tevent_req_nomem(fn_state.watch_req, req)) {
793 0 : return;
794 : }
795 :
796 57 : if (!tevent_req_set_endtime(
797 : fn_state.watch_req, state->ev,
798 57 : timeval_current_ofs(5 + generate_random() % 5, 0))) {
799 0 : return;
800 : }
801 57 : tevent_req_set_callback(fn_state.watch_req, g_lock_lock_retry, req);
802 : }
803 :
804 901 : NTSTATUS g_lock_lock_recv(struct tevent_req *req)
805 : {
806 901 : struct g_lock_lock_state *state = tevent_req_data(
807 : req, struct g_lock_lock_state);
808 901 : struct g_lock_ctx *ctx = state->ctx;
809 : NTSTATUS status;
810 :
811 901 : if (tevent_req_is_nterror(req, &status)) {
812 0 : return status;
813 : }
814 :
815 1683 : if ((ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) &&
816 1564 : ((state->type == G_LOCK_READ) ||
817 782 : (state->type == G_LOCK_WRITE))) {
818 782 : const char *name = dbwrap_name(ctx->db);
819 782 : dbwrap_lock_order_lock(name, ctx->lock_order);
820 : }
821 :
822 901 : return NT_STATUS_OK;
823 : }
824 :
825 : struct g_lock_lock_simple_state {
826 : struct server_id me;
827 : enum g_lock_type type;
828 : NTSTATUS status;
829 : };
830 :
831 25886 : static void g_lock_lock_simple_fn(
832 : struct db_record *rec,
833 : TDB_DATA value,
834 : void *private_data)
835 : {
836 25886 : struct g_lock_lock_simple_state *state = private_data;
837 : struct server_id_buf buf;
838 25886 : struct g_lock lck = { .exclusive.pid = 0 };
839 : bool ok;
840 :
841 : /*
842 : * We're trying to get a lock and if we are
843 : * successful in doing that, we should not
844 : * wakeup any other waiters, all they would
845 : * find is that we're holding a lock they
846 : * are conflicting with.
847 : */
848 25886 : dbwrap_watched_watch_skip_alerting(rec);
849 :
850 25886 : ok = g_lock_parse(value.dptr, value.dsize, &lck);
851 25886 : if (!ok) {
852 0 : DBG_DEBUG("g_lock_parse failed\n");
853 0 : state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
854 22595 : return;
855 : }
856 :
857 25886 : if (lck.exclusive.pid != 0) {
858 782 : DBG_DEBUG("locked by %s\n",
859 : server_id_str_buf(lck.exclusive, &buf));
860 782 : goto not_granted;
861 : }
862 :
863 25104 : lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
864 :
865 25104 : if (state->type == G_LOCK_WRITE) {
866 25104 : if (lck.num_shared != 0) {
867 0 : DBG_DEBUG("num_shared=%zu\n", lck.num_shared);
868 0 : goto not_granted;
869 : }
870 25104 : lck.exclusive = state->me;
871 25104 : state->status = g_lock_store(rec, &lck, NULL, NULL, 0);
872 25104 : return;
873 : }
874 :
875 0 : if (state->type == G_LOCK_READ) {
876 0 : g_lock_cleanup_shared(&lck);
877 0 : state->status = g_lock_store(rec, &lck, &state->me, NULL, 0);
878 0 : return;
879 : }
880 :
881 0 : not_granted:
882 782 : state->status = NT_STATUS_LOCK_NOT_GRANTED;
883 : }
884 :
885 25886 : NTSTATUS g_lock_lock(struct g_lock_ctx *ctx, TDB_DATA key,
886 : enum g_lock_type type, struct timeval timeout)
887 : {
888 : TALLOC_CTX *frame;
889 : struct tevent_context *ev;
890 : struct tevent_req *req;
891 : struct timeval end;
892 : NTSTATUS status;
893 :
894 25886 : if ((type == G_LOCK_READ) || (type == G_LOCK_WRITE)) {
895 : /*
896 : * This is an abstraction violation: Normally we do
897 : * the sync wrappers around async functions with full
898 : * nested event contexts. However, this is used in
899 : * very hot code paths, so avoid the event context
900 : * creation for the good path where there's no lock
901 : * contention. My benchmark gave a factor of 2
902 : * improvement for lock/unlock.
903 : */
904 51772 : struct g_lock_lock_simple_state state = {
905 25886 : .me = messaging_server_id(ctx->msg),
906 : .type = type,
907 : };
908 25886 : status = dbwrap_do_locked(
909 : ctx->db, key, g_lock_lock_simple_fn, &state);
910 25886 : if (!NT_STATUS_IS_OK(status)) {
911 0 : DBG_DEBUG("dbwrap_do_locked() failed: %s\n",
912 : nt_errstr(status));
913 25104 : return status;
914 : }
915 :
916 25886 : DBG_DEBUG("status=%s, state.status=%s\n",
917 : nt_errstr(status),
918 : nt_errstr(state.status));
919 :
920 25886 : if (NT_STATUS_IS_OK(state.status)) {
921 25104 : if (ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
922 25104 : const char *name = dbwrap_name(ctx->db);
923 25104 : dbwrap_lock_order_lock(name, ctx->lock_order);
924 : }
925 25104 : return NT_STATUS_OK;
926 : }
927 782 : if (!NT_STATUS_EQUAL(
928 : state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
929 0 : return state.status;
930 : }
931 :
932 : /*
933 : * Fall back to the full g_lock_trylock logic,
934 : * g_lock_lock_simple_fn() called above only covers
935 : * the uncontended path.
936 : */
937 : }
938 :
939 782 : frame = talloc_stackframe();
940 782 : status = NT_STATUS_NO_MEMORY;
941 :
942 782 : ev = samba_tevent_context_init(frame);
943 782 : if (ev == NULL) {
944 0 : goto fail;
945 : }
946 782 : req = g_lock_lock_send(frame, ev, ctx, key, type);
947 782 : if (req == NULL) {
948 0 : goto fail;
949 : }
950 782 : end = timeval_current_ofs(timeout.tv_sec, timeout.tv_usec);
951 782 : if (!tevent_req_set_endtime(req, ev, end)) {
952 0 : goto fail;
953 : }
954 782 : if (!tevent_req_poll_ntstatus(req, ev, &status)) {
955 0 : goto fail;
956 : }
957 782 : status = g_lock_lock_recv(req);
958 782 : fail:
959 782 : TALLOC_FREE(frame);
960 782 : return status;
961 : }
962 :
963 : struct g_lock_unlock_state {
964 : struct server_id self;
965 : NTSTATUS status;
966 : };
967 :
968 26005 : static void g_lock_unlock_fn(
969 : struct db_record *rec,
970 : TDB_DATA value,
971 : void *private_data)
972 : {
973 26005 : struct g_lock_unlock_state *state = private_data;
974 : struct server_id_buf tmp1, tmp2;
975 : struct g_lock lck;
976 : size_t i;
977 : bool ok, exclusive;
978 :
979 26005 : ok = g_lock_parse(value.dptr, value.dsize, &lck);
980 26005 : if (!ok) {
981 0 : DBG_DEBUG("g_lock_parse() failed\n");
982 0 : state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
983 7798 : return;
984 : }
985 :
986 26005 : exclusive = server_id_equal(&state->self, &lck.exclusive);
987 :
988 26005 : for (i=0; i<lck.num_shared; i++) {
989 : struct server_id shared;
990 2 : g_lock_get_shared(&lck, i, &shared);
991 2 : if (server_id_equal(&state->self, &shared)) {
992 2 : break;
993 : }
994 : }
995 :
996 26005 : if (i < lck.num_shared) {
997 2 : if (exclusive) {
998 0 : DBG_DEBUG("%s both exclusive and shared (%zu)\n",
999 : server_id_str_buf(state->self, &tmp1),
1000 : i);
1001 0 : state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
1002 0 : return;
1003 : }
1004 2 : g_lock_del_shared(&lck, i);
1005 : } else {
1006 26003 : if (!exclusive) {
1007 0 : DBG_DEBUG("Lock not found, self=%s, lck.exclusive=%s, "
1008 : "num_shared=%zu\n",
1009 : server_id_str_buf(state->self, &tmp1),
1010 : server_id_str_buf(lck.exclusive, &tmp2),
1011 : lck.num_shared);
1012 0 : state->status = NT_STATUS_NOT_FOUND;
1013 0 : return;
1014 : }
1015 26003 : lck.exclusive = (struct server_id) { .pid = 0 };
1016 : }
1017 :
1018 49466 : if ((lck.exclusive.pid == 0) &&
1019 49466 : (lck.num_shared == 0) &&
1020 26005 : (lck.datalen == 0)) {
1021 9019 : state->status = dbwrap_record_delete(rec);
1022 9019 : return;
1023 : }
1024 :
1025 16986 : if (!exclusive && lck.exclusive.pid != 0) {
1026 : /*
1027 : * We only had a read lock and there's
1028 : * someone waiting for an exclusive lock.
1029 : *
1030 : * Don't alert the exclusive lock waiter
1031 : * if there are still other read lock holders.
1032 : */
1033 0 : g_lock_cleanup_shared(&lck);
1034 0 : if (lck.num_shared != 0) {
1035 0 : dbwrap_watched_watch_skip_alerting(rec);
1036 : }
1037 : }
1038 :
1039 16986 : lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
1040 :
1041 16986 : state->status = g_lock_store(rec, &lck, NULL, NULL, 0);
1042 : }
1043 :
1044 26005 : NTSTATUS g_lock_unlock(struct g_lock_ctx *ctx, TDB_DATA key)
1045 : {
1046 49466 : struct g_lock_unlock_state state = {
1047 26005 : .self = messaging_server_id(ctx->msg),
1048 : };
1049 : NTSTATUS status;
1050 :
1051 26005 : status = dbwrap_do_locked(ctx->db, key, g_lock_unlock_fn, &state);
1052 26005 : if (!NT_STATUS_IS_OK(status)) {
1053 0 : DBG_WARNING("dbwrap_do_locked failed: %s\n",
1054 : nt_errstr(status));
1055 0 : return status;
1056 : }
1057 26005 : if (!NT_STATUS_IS_OK(state.status)) {
1058 0 : DBG_WARNING("g_lock_unlock_fn failed: %s\n",
1059 : nt_errstr(state.status));
1060 0 : return state.status;
1061 : }
1062 :
1063 26005 : if (ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
1064 25886 : const char *name = dbwrap_name(ctx->db);
1065 25886 : dbwrap_lock_order_unlock(name, ctx->lock_order);
1066 : }
1067 :
1068 26005 : return NT_STATUS_OK;
1069 : }
1070 :
1071 : struct g_lock_writev_data_state {
1072 : TDB_DATA key;
1073 : struct server_id self;
1074 : const TDB_DATA *dbufs;
1075 : size_t num_dbufs;
1076 : NTSTATUS status;
1077 : };
1078 :
1079 43336 : static void g_lock_writev_data_fn(
1080 : struct db_record *rec,
1081 : TDB_DATA value,
1082 : void *private_data)
1083 : {
1084 43336 : struct g_lock_writev_data_state *state = private_data;
1085 : struct g_lock lck;
1086 : bool exclusive;
1087 : bool ok;
1088 :
1089 : /*
1090 : * We're holding an exclusiv write lock.
1091 : *
1092 : * Now we're updating the content of the record.
1093 : *
1094 : * We should not wakeup any other waiters, all they
1095 : * would find is that we're still holding a lock they
1096 : * are conflicting with.
1097 : */
1098 43336 : dbwrap_watched_watch_skip_alerting(rec);
1099 :
1100 43336 : ok = g_lock_parse(value.dptr, value.dsize, &lck);
1101 43336 : if (!ok) {
1102 0 : DBG_DEBUG("g_lock_parse for %s failed\n",
1103 : hex_encode_talloc(talloc_tos(),
1104 : state->key.dptr,
1105 : state->key.dsize));
1106 0 : state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
1107 0 : return;
1108 : }
1109 :
1110 43336 : exclusive = server_id_equal(&state->self, &lck.exclusive);
1111 :
1112 : /*
1113 : * Make sure we're really exclusive. We are marked as
1114 : * exclusive when we are waiting for an exclusive lock
1115 : */
1116 43336 : exclusive &= (lck.num_shared == 0);
1117 :
1118 43336 : if (!exclusive) {
1119 : struct server_id_buf buf1, buf2;
1120 0 : DBG_DEBUG("Not locked by us: self=%s, lck.exclusive=%s, "
1121 : "lck.num_shared=%zu\n",
1122 : server_id_str_buf(state->self, &buf1),
1123 : server_id_str_buf(lck.exclusive, &buf2),
1124 : lck.num_shared);
1125 0 : state->status = NT_STATUS_NOT_LOCKED;
1126 0 : return;
1127 : }
1128 :
1129 43336 : lck.unique_data_epoch = generate_unique_u64(lck.unique_data_epoch);
1130 43336 : lck.data = NULL;
1131 43336 : lck.datalen = 0;
1132 43336 : state->status = g_lock_store(
1133 : rec, &lck, NULL, state->dbufs, state->num_dbufs);
1134 : }
1135 :
1136 43336 : NTSTATUS g_lock_writev_data(
1137 : struct g_lock_ctx *ctx,
1138 : TDB_DATA key,
1139 : const TDB_DATA *dbufs,
1140 : size_t num_dbufs)
1141 : {
1142 86672 : struct g_lock_writev_data_state state = {
1143 : .key = key,
1144 43336 : .self = messaging_server_id(ctx->msg),
1145 : .dbufs = dbufs,
1146 : .num_dbufs = num_dbufs,
1147 : };
1148 : NTSTATUS status;
1149 :
1150 43336 : status = dbwrap_do_locked(
1151 : ctx->db, key, g_lock_writev_data_fn, &state);
1152 43336 : if (!NT_STATUS_IS_OK(status)) {
1153 0 : DBG_WARNING("dbwrap_do_locked failed: %s\n",
1154 : nt_errstr(status));
1155 0 : return status;
1156 : }
1157 43336 : if (!NT_STATUS_IS_OK(state.status)) {
1158 0 : DBG_WARNING("g_lock_writev_data_fn failed: %s\n",
1159 : nt_errstr(state.status));
1160 0 : return state.status;
1161 : }
1162 :
1163 43336 : return NT_STATUS_OK;
1164 : }
1165 :
1166 8899 : NTSTATUS g_lock_write_data(struct g_lock_ctx *ctx, TDB_DATA key,
1167 : const uint8_t *buf, size_t buflen)
1168 : {
1169 8899 : TDB_DATA dbuf = {
1170 : .dptr = discard_const_p(uint8_t, buf),
1171 : .dsize = buflen,
1172 : };
1173 8899 : return g_lock_writev_data(ctx, key, &dbuf, 1);
1174 : }
1175 :
1176 : struct g_lock_locks_state {
1177 : int (*fn)(TDB_DATA key, void *private_data);
1178 : void *private_data;
1179 : };
1180 :
1181 132 : static int g_lock_locks_fn(struct db_record *rec, void *priv)
1182 : {
1183 : TDB_DATA key;
1184 132 : struct g_lock_locks_state *state = (struct g_lock_locks_state *)priv;
1185 :
1186 132 : key = dbwrap_record_get_key(rec);
1187 132 : return state->fn(key, state->private_data);
1188 : }
1189 :
1190 116 : int g_lock_locks(struct g_lock_ctx *ctx,
1191 : int (*fn)(TDB_DATA key, void *private_data),
1192 : void *private_data)
1193 : {
1194 : struct g_lock_locks_state state;
1195 : NTSTATUS status;
1196 : int count;
1197 :
1198 116 : state.fn = fn;
1199 116 : state.private_data = private_data;
1200 :
1201 116 : status = dbwrap_traverse_read(ctx->db, g_lock_locks_fn, &state, &count);
1202 116 : if (!NT_STATUS_IS_OK(status)) {
1203 0 : return -1;
1204 : }
1205 116 : return count;
1206 : }
1207 :
1208 : struct g_lock_dump_state {
1209 : TALLOC_CTX *mem_ctx;
1210 : TDB_DATA key;
1211 : void (*fn)(struct server_id exclusive,
1212 : size_t num_shared,
1213 : const struct server_id *shared,
1214 : const uint8_t *data,
1215 : size_t datalen,
1216 : void *private_data);
1217 : void *private_data;
1218 : NTSTATUS status;
1219 : enum dbwrap_req_state req_state;
1220 : };
1221 :
1222 81812 : static void g_lock_dump_fn(TDB_DATA key, TDB_DATA data,
1223 : void *private_data)
1224 : {
1225 81812 : struct g_lock_dump_state *state = private_data;
1226 81812 : struct g_lock lck = (struct g_lock) { .exclusive.pid = 0 };
1227 81812 : struct server_id *shared = NULL;
1228 : size_t i;
1229 : bool ok;
1230 :
1231 81812 : ok = g_lock_parse(data.dptr, data.dsize, &lck);
1232 81812 : if (!ok) {
1233 0 : DBG_DEBUG("g_lock_parse failed for %s\n",
1234 : hex_encode_talloc(talloc_tos(),
1235 : state->key.dptr,
1236 : state->key.dsize));
1237 0 : state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
1238 0 : return;
1239 : }
1240 :
1241 81812 : if (lck.num_shared > 0) {
1242 0 : shared = talloc_array(
1243 : state->mem_ctx, struct server_id, lck.num_shared);
1244 0 : if (shared == NULL) {
1245 0 : DBG_DEBUG("talloc failed\n");
1246 0 : state->status = NT_STATUS_NO_MEMORY;
1247 0 : return;
1248 : }
1249 : }
1250 :
1251 81812 : for (i=0; i<lck.num_shared; i++) {
1252 0 : g_lock_get_shared(&lck, i, &shared[i]);
1253 : }
1254 :
1255 154294 : state->fn(lck.exclusive,
1256 : lck.num_shared,
1257 : shared,
1258 81812 : lck.data,
1259 : lck.datalen,
1260 : state->private_data);
1261 :
1262 81812 : TALLOC_FREE(shared);
1263 :
1264 81812 : state->status = NT_STATUS_OK;
1265 : }
1266 :
1267 82749 : NTSTATUS g_lock_dump(struct g_lock_ctx *ctx, TDB_DATA key,
1268 : void (*fn)(struct server_id exclusive,
1269 : size_t num_shared,
1270 : const struct server_id *shared,
1271 : const uint8_t *data,
1272 : size_t datalen,
1273 : void *private_data),
1274 : void *private_data)
1275 : {
1276 82749 : struct g_lock_dump_state state = {
1277 : .mem_ctx = ctx, .key = key,
1278 : .fn = fn, .private_data = private_data
1279 : };
1280 : NTSTATUS status;
1281 :
1282 82749 : status = dbwrap_parse_record(ctx->db, key, g_lock_dump_fn, &state);
1283 82749 : if (!NT_STATUS_IS_OK(status)) {
1284 937 : DBG_DEBUG("dbwrap_parse_record returned %s\n",
1285 : nt_errstr(status));
1286 937 : return status;
1287 : }
1288 81812 : if (!NT_STATUS_IS_OK(state.status)) {
1289 0 : DBG_DEBUG("g_lock_dump_fn returned %s\n",
1290 : nt_errstr(state.status));
1291 0 : return state.status;
1292 : }
1293 81812 : return NT_STATUS_OK;
1294 : }
1295 :
1296 : static void g_lock_dump_done(struct tevent_req *subreq);
1297 :
1298 0 : struct tevent_req *g_lock_dump_send(
1299 : TALLOC_CTX *mem_ctx,
1300 : struct tevent_context *ev,
1301 : struct g_lock_ctx *ctx,
1302 : TDB_DATA key,
1303 : void (*fn)(struct server_id exclusive,
1304 : size_t num_shared,
1305 : const struct server_id *shared,
1306 : const uint8_t *data,
1307 : size_t datalen,
1308 : void *private_data),
1309 : void *private_data)
1310 : {
1311 0 : struct tevent_req *req = NULL, *subreq = NULL;
1312 0 : struct g_lock_dump_state *state = NULL;
1313 :
1314 0 : req = tevent_req_create(mem_ctx, &state, struct g_lock_dump_state);
1315 0 : if (req == NULL) {
1316 0 : return NULL;
1317 : }
1318 0 : state->mem_ctx = state;
1319 0 : state->key = key;
1320 0 : state->fn = fn;
1321 0 : state->private_data = private_data;
1322 :
1323 0 : subreq = dbwrap_parse_record_send(
1324 : state,
1325 : ev,
1326 : ctx->db,
1327 : key,
1328 : g_lock_dump_fn,
1329 : state,
1330 0 : &state->req_state);
1331 0 : if (tevent_req_nomem(subreq, req)) {
1332 0 : return tevent_req_post(req, ev);
1333 : }
1334 0 : tevent_req_set_callback(subreq, g_lock_dump_done, req);
1335 0 : return req;
1336 : }
1337 :
1338 0 : static void g_lock_dump_done(struct tevent_req *subreq)
1339 : {
1340 0 : struct tevent_req *req = tevent_req_callback_data(
1341 : subreq, struct tevent_req);
1342 0 : struct g_lock_dump_state *state = tevent_req_data(
1343 : req, struct g_lock_dump_state);
1344 : NTSTATUS status;
1345 :
1346 0 : status = dbwrap_parse_record_recv(subreq);
1347 0 : TALLOC_FREE(subreq);
1348 0 : if (tevent_req_nterror(req, status) ||
1349 0 : tevent_req_nterror(req, state->status)) {
1350 0 : return;
1351 : }
1352 0 : tevent_req_done(req);
1353 : }
1354 :
1355 0 : NTSTATUS g_lock_dump_recv(struct tevent_req *req)
1356 : {
1357 0 : return tevent_req_simple_recv_ntstatus(req);
1358 : }
1359 :
1360 467 : int g_lock_seqnum(struct g_lock_ctx *ctx)
1361 : {
1362 467 : return dbwrap_get_seqnum(ctx->db);
1363 : }
1364 :
1365 : struct g_lock_watch_data_state {
1366 : struct tevent_context *ev;
1367 : struct g_lock_ctx *ctx;
1368 : TDB_DATA key;
1369 : struct server_id blocker;
1370 : bool blockerdead;
1371 : uint64_t unique_lock_epoch;
1372 : uint64_t unique_data_epoch;
1373 : uint64_t watch_instance;
1374 : NTSTATUS status;
1375 : };
1376 :
1377 : static void g_lock_watch_data_done(struct tevent_req *subreq);
1378 :
1379 0 : static void g_lock_watch_data_send_fn(
1380 : struct db_record *rec,
1381 : TDB_DATA value,
1382 : void *private_data)
1383 : {
1384 0 : struct tevent_req *req = talloc_get_type_abort(
1385 : private_data, struct tevent_req);
1386 0 : struct g_lock_watch_data_state *state = tevent_req_data(
1387 : req, struct g_lock_watch_data_state);
1388 0 : struct tevent_req *subreq = NULL;
1389 : struct g_lock lck;
1390 : bool ok;
1391 :
1392 0 : ok = g_lock_parse(value.dptr, value.dsize, &lck);
1393 0 : if (!ok) {
1394 0 : state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
1395 0 : return;
1396 : }
1397 0 : state->unique_lock_epoch = lck.unique_lock_epoch;
1398 0 : state->unique_data_epoch = lck.unique_data_epoch;
1399 :
1400 0 : DBG_DEBUG("state->unique_data_epoch=%"PRIu64"\n", state->unique_data_epoch);
1401 :
1402 0 : subreq = dbwrap_watched_watch_send(
1403 : state, state->ev, rec, 0, state->blocker);
1404 0 : if (subreq == NULL) {
1405 0 : state->status = NT_STATUS_NO_MEMORY;
1406 0 : return;
1407 : }
1408 0 : tevent_req_set_callback(subreq, g_lock_watch_data_done, req);
1409 :
1410 0 : state->status = NT_STATUS_EVENT_PENDING;
1411 : }
1412 :
1413 0 : struct tevent_req *g_lock_watch_data_send(
1414 : TALLOC_CTX *mem_ctx,
1415 : struct tevent_context *ev,
1416 : struct g_lock_ctx *ctx,
1417 : TDB_DATA key,
1418 : struct server_id blocker)
1419 : {
1420 0 : struct tevent_req *req = NULL;
1421 0 : struct g_lock_watch_data_state *state = NULL;
1422 : NTSTATUS status;
1423 :
1424 0 : req = tevent_req_create(
1425 : mem_ctx, &state, struct g_lock_watch_data_state);
1426 0 : if (req == NULL) {
1427 0 : return NULL;
1428 : }
1429 0 : state->ev = ev;
1430 0 : state->ctx = ctx;
1431 0 : state->blocker = blocker;
1432 :
1433 0 : state->key = tdb_data_talloc_copy(state, key);
1434 0 : if (tevent_req_nomem(state->key.dptr, req)) {
1435 0 : return tevent_req_post(req, ev);
1436 : }
1437 :
1438 0 : status = dbwrap_do_locked(
1439 : ctx->db, key, g_lock_watch_data_send_fn, req);
1440 0 : if (tevent_req_nterror(req, status)) {
1441 0 : DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
1442 0 : return tevent_req_post(req, ev);
1443 : }
1444 :
1445 0 : if (NT_STATUS_IS_OK(state->status)) {
1446 0 : tevent_req_done(req);
1447 0 : return tevent_req_post(req, ev);
1448 : }
1449 :
1450 0 : return req;
1451 : }
1452 :
1453 0 : static void g_lock_watch_data_done_fn(
1454 : struct db_record *rec,
1455 : TDB_DATA value,
1456 : void *private_data)
1457 : {
1458 0 : struct tevent_req *req = talloc_get_type_abort(
1459 : private_data, struct tevent_req);
1460 0 : struct g_lock_watch_data_state *state = tevent_req_data(
1461 : req, struct g_lock_watch_data_state);
1462 0 : struct tevent_req *subreq = NULL;
1463 : struct g_lock lck;
1464 : bool ok;
1465 :
1466 0 : ok = g_lock_parse(value.dptr, value.dsize, &lck);
1467 0 : if (!ok) {
1468 0 : dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
1469 0 : state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
1470 0 : return;
1471 : }
1472 :
1473 0 : if (lck.unique_data_epoch != state->unique_data_epoch) {
1474 0 : dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
1475 0 : DBG_DEBUG("lck.unique_data_epoch=%"PRIu64", "
1476 : "state->unique_data_epoch=%"PRIu64"\n",
1477 : lck.unique_data_epoch,
1478 : state->unique_data_epoch);
1479 0 : state->status = NT_STATUS_OK;
1480 0 : return;
1481 : }
1482 :
1483 : /*
1484 : * The lock epoch changed, so we better
1485 : * remove ourself from the waiter list
1486 : * (most likely the first position)
1487 : * and re-add us at the end of the list.
1488 : *
1489 : * This gives other lock waiters a change
1490 : * to make progress.
1491 : *
1492 : * Otherwise we'll keep our waiter instance alive,
1493 : * keep waiting (most likely at first position).
1494 : */
1495 0 : if (lck.unique_lock_epoch != state->unique_lock_epoch) {
1496 0 : dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
1497 0 : state->watch_instance = dbwrap_watched_watch_add_instance(rec);
1498 0 : state->unique_lock_epoch = lck.unique_lock_epoch;
1499 : }
1500 :
1501 0 : subreq = dbwrap_watched_watch_send(
1502 : state, state->ev, rec, state->watch_instance, state->blocker);
1503 0 : if (subreq == NULL) {
1504 0 : dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
1505 0 : state->status = NT_STATUS_NO_MEMORY;
1506 0 : return;
1507 : }
1508 0 : tevent_req_set_callback(subreq, g_lock_watch_data_done, req);
1509 :
1510 0 : state->status = NT_STATUS_EVENT_PENDING;
1511 : }
1512 :
1513 0 : static void g_lock_watch_data_done(struct tevent_req *subreq)
1514 : {
1515 0 : struct tevent_req *req = tevent_req_callback_data(
1516 : subreq, struct tevent_req);
1517 0 : struct g_lock_watch_data_state *state = tevent_req_data(
1518 : req, struct g_lock_watch_data_state);
1519 : NTSTATUS status;
1520 0 : uint64_t instance = 0;
1521 :
1522 0 : status = dbwrap_watched_watch_recv(
1523 : subreq, &instance, &state->blockerdead, &state->blocker);
1524 0 : TALLOC_FREE(subreq);
1525 0 : if (tevent_req_nterror(req, status)) {
1526 0 : DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
1527 : nt_errstr(status));
1528 0 : return;
1529 : }
1530 :
1531 0 : state->watch_instance = instance;
1532 :
1533 0 : status = dbwrap_do_locked(
1534 0 : state->ctx->db, state->key, g_lock_watch_data_done_fn, req);
1535 0 : if (tevent_req_nterror(req, status)) {
1536 0 : DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
1537 0 : return;
1538 : }
1539 0 : if (NT_STATUS_EQUAL(state->status, NT_STATUS_EVENT_PENDING)) {
1540 0 : return;
1541 : }
1542 0 : if (tevent_req_nterror(req, state->status)) {
1543 0 : return;
1544 : }
1545 0 : tevent_req_done(req);
1546 : }
1547 :
1548 0 : NTSTATUS g_lock_watch_data_recv(
1549 : struct tevent_req *req,
1550 : bool *blockerdead,
1551 : struct server_id *blocker)
1552 : {
1553 0 : struct g_lock_watch_data_state *state = tevent_req_data(
1554 : req, struct g_lock_watch_data_state);
1555 : NTSTATUS status;
1556 :
1557 0 : if (tevent_req_is_nterror(req, &status)) {
1558 0 : return status;
1559 : }
1560 0 : if (blockerdead != NULL) {
1561 0 : *blockerdead = state->blockerdead;
1562 : }
1563 0 : if (blocker != NULL) {
1564 0 : *blocker = state->blocker;
1565 : }
1566 :
1567 0 : return NT_STATUS_OK;
1568 : }
1569 :
1570 128 : static void g_lock_wake_watchers_fn(
1571 : struct db_record *rec,
1572 : TDB_DATA value,
1573 : void *private_data)
1574 : {
1575 128 : struct g_lock lck = { .exclusive.pid = 0 };
1576 : NTSTATUS status;
1577 : bool ok;
1578 :
1579 128 : ok = g_lock_parse(value.dptr, value.dsize, &lck);
1580 128 : if (!ok) {
1581 0 : DBG_WARNING("g_lock_parse failed\n");
1582 0 : return;
1583 : }
1584 :
1585 128 : lck.unique_data_epoch = generate_unique_u64(lck.unique_data_epoch);
1586 :
1587 128 : status = g_lock_store(rec, &lck, NULL, NULL, 0);
1588 128 : if (!NT_STATUS_IS_OK(status)) {
1589 0 : DBG_WARNING("g_lock_store failed: %s\n", nt_errstr(status));
1590 0 : return;
1591 : }
1592 : }
1593 :
1594 128 : void g_lock_wake_watchers(struct g_lock_ctx *ctx, TDB_DATA key)
1595 : {
1596 : NTSTATUS status;
1597 :
1598 128 : status = dbwrap_do_locked(ctx->db, key, g_lock_wake_watchers_fn, NULL);
1599 128 : if (!NT_STATUS_IS_OK(status)) {
1600 0 : DBG_DEBUG("dbwrap_do_locked returned %s\n",
1601 : nt_errstr(status));
1602 : }
1603 128 : }
|