Line data Source code
1 : /*
2 : Unix SMB/CIFS implementation.
3 :
4 : POSIX NTVFS backend - locking
5 :
6 : Copyright (C) Andrew Tridgell 2004
7 :
8 : This program is free software; you can redistribute it and/or modify
9 : it under the terms of the GNU General Public License as published by
10 : the Free Software Foundation; either version 3 of the License, or
11 : (at your option) any later version.
12 :
13 : This program is distributed in the hope that it will be useful,
14 : but WITHOUT ANY WARRANTY; without even the implied warranty of
15 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 : GNU General Public License for more details.
17 :
18 : You should have received a copy of the GNU General Public License
19 : along with this program. If not, see <http://www.gnu.org/licenses/>.
20 : */
21 :
22 : #include "includes.h"
23 : #include "vfs_posix.h"
24 : #include "system/time.h"
25 : #include "../lib/util/dlinklist.h"
26 : #include "messaging/messaging.h"
27 :
28 :
29 : /*
30 : check if we can perform IO on a range that might be locked
31 : */
32 51553 : NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
33 : struct pvfs_file *f,
34 : uint32_t smbpid,
35 : uint64_t offset, uint64_t count,
36 : enum brl_type rw)
37 : {
38 51553 : if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
39 0 : return NT_STATUS_OK;
40 : }
41 :
42 51553 : return brlock_locktest(pvfs->brl_context,
43 : f->brl_handle,
44 : smbpid,
45 : offset, count, rw);
46 : }
47 :
48 : /* this state structure holds information about a lock we are waiting on */
49 : struct pvfs_pending_lock {
50 : struct pvfs_pending_lock *next, *prev;
51 : struct pvfs_state *pvfs;
52 : union smb_lock *lck;
53 : struct pvfs_file *f;
54 : struct ntvfs_request *req;
55 : int pending_lock;
56 : struct pvfs_wait *wait_handle;
57 : struct timeval end_time;
58 : };
59 :
60 : /*
61 : a secondary attempt to setup a lock has failed - back out
62 : the locks we did get and send an error
63 : */
64 64 : static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
65 : struct ntvfs_request *req,
66 : struct pvfs_file *f,
67 : struct smb_lock_entry *locks,
68 : int i,
69 : NTSTATUS status)
70 : {
71 : /* undo the locks we just did */
72 64 : for (i--;i>=0;i--) {
73 0 : brlock_unlock(pvfs->brl_context,
74 : f->brl_handle,
75 0 : locks[i].pid,
76 0 : locks[i].offset,
77 0 : locks[i].count);
78 0 : f->lock_count--;
79 : }
80 64 : req->async_states->status = status;
81 64 : req->async_states->send_fn(req);
82 64 : }
83 :
84 :
85 : /*
86 : called when we receive a pending lock notification. It means that
87 : either our lock timed out or someone else has unlocked a overlapping
88 : range, so we should try the lock again. Note that on timeout we
89 : do retry the lock, giving it a last chance.
90 : */
91 83 : static void pvfs_pending_lock_continue(void *private_data, enum pvfs_wait_notice reason)
92 : {
93 83 : struct pvfs_pending_lock *pending = talloc_get_type(private_data,
94 : struct pvfs_pending_lock);
95 83 : struct pvfs_state *pvfs = pending->pvfs;
96 83 : struct pvfs_file *f = pending->f;
97 83 : struct ntvfs_request *req = pending->req;
98 83 : union smb_lock *lck = pending->lck;
99 : struct smb_lock_entry *locks;
100 : enum brl_type rw;
101 : NTSTATUS status;
102 : int i;
103 : bool timed_out;
104 :
105 83 : timed_out = (reason != PVFS_WAIT_EVENT);
106 :
107 83 : locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
108 :
109 83 : if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
110 4 : rw = READ_LOCK;
111 : } else {
112 79 : rw = WRITE_LOCK;
113 : }
114 :
115 83 : DLIST_REMOVE(f->pending_list, pending);
116 :
117 : /* we don't retry on a cancel */
118 83 : if (reason == PVFS_WAIT_CANCEL) {
119 4 : if (pvfs->ntvfs->ctx->protocol < PROTOCOL_SMB2_02) {
120 2 : status = NT_STATUS_FILE_LOCK_CONFLICT;
121 : } else {
122 2 : status = NT_STATUS_CANCELLED;
123 : }
124 : } else {
125 : /*
126 : * here it's important to pass the pending pointer
127 : * because with this we'll get the correct error code
128 : * FILE_LOCK_CONFLICT in the error case
129 : */
130 237 : status = brlock_lock(pvfs->brl_context,
131 : f->brl_handle,
132 79 : locks[pending->pending_lock].pid,
133 79 : locks[pending->pending_lock].offset,
134 79 : locks[pending->pending_lock].count,
135 : rw, pending);
136 : }
137 83 : if (NT_STATUS_IS_OK(status)) {
138 18 : f->lock_count++;
139 18 : timed_out = false;
140 : }
141 :
142 : /* if we have failed and timed out, or succeeded, then we
143 : don't need the pending lock any more */
144 83 : if (NT_STATUS_IS_OK(status) || timed_out) {
145 : NTSTATUS status2;
146 82 : status2 = brlock_remove_pending(pvfs->brl_context,
147 : f->brl_handle, pending);
148 82 : if (!NT_STATUS_IS_OK(status2)) {
149 0 : DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
150 : }
151 82 : talloc_free(pending->wait_handle);
152 : }
153 :
154 83 : if (!NT_STATUS_IS_OK(status)) {
155 65 : if (timed_out) {
156 : /* no more chances */
157 64 : pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
158 64 : talloc_free(pending);
159 : } else {
160 : /* we can try again */
161 1 : DLIST_ADD(f->pending_list, pending);
162 : }
163 136 : return;
164 : }
165 :
166 : /* if we haven't timed out yet, then we can do more pending locks */
167 18 : if (rw == READ_LOCK) {
168 4 : rw = PENDING_READ_LOCK;
169 : } else {
170 14 : rw = PENDING_WRITE_LOCK;
171 : }
172 :
173 : /* we've now got the pending lock. try and get the rest, which might
174 : lead to more pending locks */
175 18 : for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {
176 6 : if (pending) {
177 6 : pending->pending_lock = i;
178 : }
179 :
180 18 : status = brlock_lock(pvfs->brl_context,
181 : f->brl_handle,
182 6 : locks[i].pid,
183 6 : locks[i].offset,
184 6 : locks[i].count,
185 : rw, pending);
186 6 : if (!NT_STATUS_IS_OK(status)) {
187 6 : if (pending) {
188 : /* a timed lock failed - setup a wait message to handle
189 : the pending lock notification or a timeout */
190 6 : pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
191 : pending->end_time,
192 : pvfs_pending_lock_continue,
193 : pending);
194 6 : if (pending->wait_handle == NULL) {
195 0 : pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
196 0 : talloc_free(pending);
197 : } else {
198 6 : talloc_steal(pending, pending->wait_handle);
199 6 : DLIST_ADD(f->pending_list, pending);
200 : }
201 6 : return;
202 : }
203 0 : pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
204 0 : talloc_free(pending);
205 0 : return;
206 : }
207 :
208 0 : f->lock_count++;
209 : }
210 :
211 : /* we've managed to get all the locks. Tell the client */
212 12 : req->async_states->status = NT_STATUS_OK;
213 12 : req->async_states->send_fn(req);
214 12 : talloc_free(pending);
215 : }
216 :
217 :
218 : /*
219 : called when we close a file that might have locks
220 : */
221 188958 : void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
222 : {
223 : struct pvfs_pending_lock *p, *next;
224 :
225 188958 : if (f->lock_count || f->pending_list) {
226 147 : DEBUG(5,("pvfs_lock: removing %.0f locks on close\n",
227 : (double)f->lock_count));
228 147 : brlock_close(f->pvfs->brl_context, f->brl_handle);
229 147 : f->lock_count = 0;
230 : }
231 :
232 : /* reply to all the pending lock requests, telling them the
233 : lock failed */
234 188987 : for (p=f->pending_list;p;p=next) {
235 29 : next = p->next;
236 29 : DLIST_REMOVE(f->pending_list, p);
237 29 : p->req->async_states->status = NT_STATUS_RANGE_NOT_LOCKED;
238 29 : p->req->async_states->send_fn(p->req);
239 : }
240 188958 : }
241 :
242 :
243 : /*
244 : cancel a set of locks
245 : */
246 10 : static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct ntvfs_request *req, union smb_lock *lck,
247 : struct pvfs_file *f)
248 : {
249 : struct pvfs_pending_lock *p;
250 :
251 14 : for (p=f->pending_list;p;p=p->next) {
252 : /* check if the lock request matches exactly - you can only cancel with exact matches */
253 10 : if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
254 8 : p->lck->lockx.in.lock_cnt == lck->lockx.in.lock_cnt &&
255 6 : p->lck->lockx.in.file.ntvfs== lck->lockx.in.file.ntvfs &&
256 3 : p->lck->lockx.in.mode == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
257 : int i;
258 :
259 2 : for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
260 2 : if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
261 2 : p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
262 1 : p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
263 : break;
264 : }
265 : }
266 1 : if (i < lck->lockx.in.ulock_cnt) continue;
267 :
268 : /* an exact match! we can cancel it, which is equivalent
269 : to triggering the timeout early */
270 1 : pvfs_pending_lock_continue(p, PVFS_WAIT_TIMEOUT);
271 1 : return NT_STATUS_OK;
272 : }
273 : }
274 :
275 9 : return NT_STATUS_DOS(ERRDOS, ERRcancelviolation);
276 : }
277 :
278 :
279 : /*
280 : lock or unlock a byte range
281 : */
282 2905 : NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
283 : struct ntvfs_request *req, union smb_lock *lck)
284 : {
285 2905 : struct pvfs_state *pvfs = talloc_get_type(ntvfs->private_data,
286 : struct pvfs_state);
287 : struct pvfs_file *f;
288 : struct smb_lock_entry *locks;
289 : int i;
290 : enum brl_type rw;
291 2905 : struct pvfs_pending_lock *pending = NULL;
292 : NTSTATUS status;
293 :
294 2905 : if (lck->generic.level != RAW_LOCK_GENERIC) {
295 462 : return ntvfs_map_lock(ntvfs, req, lck);
296 : }
297 :
298 2443 : if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
299 85 : return pvfs_oplock_release(ntvfs, req, lck);
300 : }
301 :
302 2358 : f = pvfs_find_fd(pvfs, req, lck->lockx.in.file.ntvfs);
303 2358 : if (!f) {
304 0 : return NT_STATUS_INVALID_HANDLE;
305 : }
306 :
307 2358 : if (f->handle->fd == -1) {
308 0 : return NT_STATUS_FILE_IS_A_DIRECTORY;
309 : }
310 :
311 2358 : status = pvfs_break_level2_oplocks(f);
312 2358 : NT_STATUS_NOT_OK_RETURN(status);
313 :
314 2716 : if (lck->lockx.in.timeout != 0 &&
315 358 : (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
316 358 : pending = talloc(f, struct pvfs_pending_lock);
317 358 : if (pending == NULL) {
318 0 : return NT_STATUS_NO_MEMORY;
319 : }
320 :
321 358 : pending->pvfs = pvfs;
322 358 : pending->lck = lck;
323 358 : pending->f = f;
324 358 : pending->req = req;
325 :
326 358 : pending->end_time =
327 358 : timeval_current_ofs_msec(lck->lockx.in.timeout);
328 : }
329 :
330 2358 : if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
331 321 : rw = pending? PENDING_READ_LOCK : READ_LOCK;
332 : } else {
333 2037 : rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
334 : }
335 :
336 2358 : if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
337 10 : talloc_free(pending);
338 10 : return pvfs_lock_cancel(pvfs, req, lck, f);
339 : }
340 :
341 2348 : if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
342 : /* this seems to not be supported by any windows server,
343 : or used by any clients */
344 4 : talloc_free(pending);
345 4 : return NT_STATUS_DOS(ERRDOS, ERRnoatomiclocks);
346 : }
347 :
348 : /* the unlocks happen first */
349 2344 : locks = lck->lockx.in.locks;
350 :
351 2924 : for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
352 2046 : status = brlock_unlock(pvfs->brl_context,
353 : f->brl_handle,
354 682 : locks[i].pid,
355 682 : locks[i].offset,
356 682 : locks[i].count);
357 682 : if (!NT_STATUS_IS_OK(status)) {
358 102 : talloc_free(pending);
359 102 : return status;
360 : }
361 580 : f->lock_count--;
362 : }
363 :
364 2242 : locks += i;
365 :
366 3066 : for (i=0;i<lck->lockx.in.lock_cnt;i++) {
367 1679 : if (pending) {
368 228 : pending->pending_lock = i;
369 : }
370 :
371 5037 : status = brlock_lock(pvfs->brl_context,
372 : f->brl_handle,
373 1679 : locks[i].pid,
374 1679 : locks[i].offset,
375 1679 : locks[i].count,
376 : rw, pending);
377 1679 : if (!NT_STATUS_IS_OK(status)) {
378 855 : if (pending) {
379 : /* a timed lock failed - setup a wait message to handle
380 : the pending lock notification or a timeout */
381 105 : pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
382 : pending->end_time,
383 : pvfs_pending_lock_continue,
384 : pending);
385 105 : if (pending->wait_handle == NULL) {
386 0 : talloc_free(pending);
387 0 : return NT_STATUS_NO_MEMORY;
388 : }
389 105 : talloc_steal(pending, pending->wait_handle);
390 105 : DLIST_ADD(f->pending_list, pending);
391 105 : return NT_STATUS_OK;
392 : }
393 :
394 : /* undo the locks we just did */
395 752 : for (i--;i>=0;i--) {
396 6 : brlock_unlock(pvfs->brl_context,
397 : f->brl_handle,
398 2 : locks[i].pid,
399 2 : locks[i].offset,
400 2 : locks[i].count);
401 2 : f->lock_count--;
402 : }
403 750 : talloc_free(pending);
404 750 : return status;
405 : }
406 824 : f->lock_count++;
407 : }
408 :
409 1387 : talloc_free(pending);
410 1387 : return NT_STATUS_OK;
411 : }
|