root/source4/ntvfs/posix/pvfs_lock.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. pvfs_check_lock
  2. pvfs_lock_async_failed
  3. pvfs_pending_lock_continue
  4. pvfs_lock_close
  5. pvfs_lock_cancel
  6. pvfs_lock

   1 /* 
   2    Unix SMB/CIFS implementation.
   3 
   4    POSIX NTVFS backend - locking
   5 
   6    Copyright (C) Andrew Tridgell 2004
   7 
   8    This program is free software; you can redistribute it and/or modify
   9    it under the terms of the GNU General Public License as published by
  10    the Free Software Foundation; either version 3 of the License, or
  11    (at your option) any later version.
  12    
  13    This program is distributed in the hope that it will be useful,
  14    but WITHOUT ANY WARRANTY; without even the implied warranty of
  15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16    GNU General Public License for more details.
  17    
  18    You should have received a copy of the GNU General Public License
  19    along with this program.  If not, see <http://www.gnu.org/licenses/>.
  20 */
  21 
  22 #include "includes.h"
  23 #include "vfs_posix.h"
  24 #include "system/time.h"
  25 #include "../lib/util/dlinklist.h"
  26 #include "messaging/messaging.h"
  27 
  28 
  29 /*
  30   check if we can perform IO on a range that might be locked
  31 */
  32 NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
     /* [<][>][^][v][top][bottom][index][help] */
  33                          struct pvfs_file *f,
  34                          uint32_t smbpid,
  35                          uint64_t offset, uint64_t count,
  36                          enum brl_type rw)
  37 {
  38         if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
  39                 return NT_STATUS_OK;
  40         }
  41 
  42         return brl_locktest(pvfs->brl_context,
  43                             f->brl_handle,
  44                             smbpid,
  45                             offset, count, rw);
  46 }
  47 
  48 /* this state structure holds information about a lock we are waiting on */
  49 struct pvfs_pending_lock {
  50         struct pvfs_pending_lock *next, *prev;
  51         struct pvfs_state *pvfs;
  52         union smb_lock *lck;
  53         struct pvfs_file *f;
  54         struct ntvfs_request *req;
  55         int pending_lock;
  56         struct pvfs_wait *wait_handle;
  57         struct timeval end_time;
  58 };
  59 
  60 /*
  61   a secondary attempt to setup a lock has failed - back out
  62   the locks we did get and send an error
  63 */
  64 static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
     /* [<][>][^][v][top][bottom][index][help] */
  65                                    struct ntvfs_request *req,
  66                                    struct pvfs_file *f,
  67                                    struct smb_lock_entry *locks,
  68                                    int i,
  69                                    NTSTATUS status)
  70 {
  71         /* undo the locks we just did */
  72         for (i--;i>=0;i--) {
  73                 brl_unlock(pvfs->brl_context,
  74                            f->brl_handle,
  75                            locks[i].pid,
  76                            locks[i].offset,
  77                            locks[i].count);
  78                 f->lock_count--;
  79         }
  80         req->async_states->status = status;
  81         req->async_states->send_fn(req);
  82 }
  83 
  84 
  85 /*
  86   called when we receive a pending lock notification. It means that
  87   either our lock timed out or someone else has unlocked a overlapping
  88   range, so we should try the lock again. Note that on timeout we
  89   do retry the lock, giving it a last chance.
  90 */
  91 static void pvfs_pending_lock_continue(void *private_data, enum pvfs_wait_notice reason)
     /* [<][>][^][v][top][bottom][index][help] */
  92 {
  93         struct pvfs_pending_lock *pending = talloc_get_type(private_data,
  94                                             struct pvfs_pending_lock);
  95         struct pvfs_state *pvfs = pending->pvfs;
  96         struct pvfs_file *f = pending->f;
  97         struct ntvfs_request *req = pending->req;
  98         union smb_lock *lck = pending->lck;
  99         struct smb_lock_entry *locks;
 100         enum brl_type rw;
 101         NTSTATUS status;
 102         int i;
 103         bool timed_out;
 104 
 105         timed_out = (reason != PVFS_WAIT_EVENT);
 106 
 107         locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
 108 
 109         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
 110                 rw = READ_LOCK;
 111         } else {
 112                 rw = WRITE_LOCK;
 113         }
 114 
 115         DLIST_REMOVE(f->pending_list, pending);
 116 
 117         /* we don't retry on a cancel */
 118         if (reason == PVFS_WAIT_CANCEL) {
 119                 status = NT_STATUS_FILE_LOCK_CONFLICT;
 120         } else {
 121                 /* 
 122                  * here it's important to pass the pending pointer
 123                  * because with this we'll get the correct error code
 124                  * FILE_LOCK_CONFLICT in the error case
 125                  */
 126                 status = brl_lock(pvfs->brl_context,
 127                                   f->brl_handle,
 128                                   locks[pending->pending_lock].pid,
 129                                   locks[pending->pending_lock].offset,
 130                                   locks[pending->pending_lock].count,
 131                                   rw, pending);
 132         }
 133         if (NT_STATUS_IS_OK(status)) {
 134                 f->lock_count++;
 135                 timed_out = false;
 136         }
 137 
 138         /* if we have failed and timed out, or succeeded, then we
 139            don't need the pending lock any more */
 140         if (NT_STATUS_IS_OK(status) || timed_out) {
 141                 NTSTATUS status2;
 142                 status2 = brl_remove_pending(pvfs->brl_context, 
 143                                              f->brl_handle, pending);
 144                 if (!NT_STATUS_IS_OK(status2)) {
 145                         DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
 146                 }
 147                 talloc_free(pending->wait_handle);
 148         }
 149 
 150         if (!NT_STATUS_IS_OK(status)) {
 151                 if (timed_out) {
 152                         /* no more chances */
 153                         pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
 154                         talloc_free(pending);
 155                 } else {
 156                         /* we can try again */
 157                         DLIST_ADD(f->pending_list, pending);
 158                 }
 159                 return;
 160         }
 161 
 162         /* if we haven't timed out yet, then we can do more pending locks */
 163         if (rw == READ_LOCK) {
 164                 rw = PENDING_READ_LOCK;
 165         } else {
 166                 rw = PENDING_WRITE_LOCK;
 167         }
 168 
 169         /* we've now got the pending lock. try and get the rest, which might
 170            lead to more pending locks */
 171         for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {          
 172                 if (pending) {
 173                         pending->pending_lock = i;
 174                 }
 175 
 176                 status = brl_lock(pvfs->brl_context,
 177                                   f->brl_handle,
 178                                   locks[i].pid,
 179                                   locks[i].offset,
 180                                   locks[i].count,
 181                                   rw, pending);
 182                 if (!NT_STATUS_IS_OK(status)) {
 183                         if (pending) {
 184                                 /* a timed lock failed - setup a wait message to handle
 185                                    the pending lock notification or a timeout */
 186                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
 187                                                                          pending->end_time,
 188                                                                          pvfs_pending_lock_continue,
 189                                                                          pending);
 190                                 if (pending->wait_handle == NULL) {
 191                                         pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
 192                                         talloc_free(pending);
 193                                 } else {
 194                                         talloc_steal(pending, pending->wait_handle);
 195                                         DLIST_ADD(f->pending_list, pending);
 196                                 }
 197                                 return;
 198                         }
 199                         pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
 200                         talloc_free(pending);
 201                         return;
 202                 }
 203 
 204                 f->lock_count++;
 205         }
 206 
 207         /* we've managed to get all the locks. Tell the client */
 208         req->async_states->status = NT_STATUS_OK;
 209         req->async_states->send_fn(req);
 210         talloc_free(pending);
 211 }
 212 
 213 
 214 /*
 215   called when we close a file that might have locks
 216 */
 217 void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
     /* [<][>][^][v][top][bottom][index][help] */
 218 {
 219         struct pvfs_pending_lock *p, *next;
 220 
 221         if (f->lock_count || f->pending_list) {
 222                 DEBUG(5,("pvfs_lock: removing %.0f locks on close\n", 
 223                          (double)f->lock_count));
 224                 brl_close(f->pvfs->brl_context, f->brl_handle);
 225                 f->lock_count = 0;
 226         }
 227 
 228         /* reply to all the pending lock requests, telling them the 
 229            lock failed */
 230         for (p=f->pending_list;p;p=next) {
 231                 next = p->next;
 232                 DLIST_REMOVE(f->pending_list, p);
 233                 p->req->async_states->status = NT_STATUS_RANGE_NOT_LOCKED;
 234                 p->req->async_states->send_fn(p->req);
 235         }
 236 }
 237 
 238 
 239 /*
 240   cancel a set of locks
 241 */
 242 static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct ntvfs_request *req, union smb_lock *lck,
     /* [<][>][^][v][top][bottom][index][help] */
 243                                  struct pvfs_file *f)
 244 {
 245         struct pvfs_pending_lock *p;
 246 
 247         for (p=f->pending_list;p;p=p->next) {
 248                 /* check if the lock request matches exactly - you can only cancel with exact matches */
 249                 if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
 250                     p->lck->lockx.in.lock_cnt  == lck->lockx.in.lock_cnt &&
 251                     p->lck->lockx.in.file.ntvfs== lck->lockx.in.file.ntvfs &&
 252                     p->lck->lockx.in.mode      == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
 253                         int i;
 254 
 255                         for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
 256                                 if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
 257                                     p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
 258                                     p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
 259                                         break;
 260                                 }
 261                         }
 262                         if (i < lck->lockx.in.ulock_cnt) continue;
 263 
 264                         /* an exact match! we can cancel it, which is equivalent
 265                            to triggering the timeout early */
 266                         pvfs_pending_lock_continue(p, PVFS_WAIT_TIMEOUT);
 267                         return NT_STATUS_OK;
 268                 }
 269         }
 270 
 271         return NT_STATUS_DOS(ERRDOS, ERRcancelviolation);
 272 }
 273 
 274 
 275 /*
 276   lock or unlock a byte range
 277 */
 278 NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
     /* [<][>][^][v][top][bottom][index][help] */
 279                    struct ntvfs_request *req, union smb_lock *lck)
 280 {
 281         struct pvfs_state *pvfs = talloc_get_type(ntvfs->private_data,
 282                                   struct pvfs_state);
 283         struct pvfs_file *f;
 284         struct smb_lock_entry *locks;
 285         int i;
 286         enum brl_type rw;
 287         struct pvfs_pending_lock *pending = NULL;
 288         NTSTATUS status;
 289 
 290         if (lck->generic.level != RAW_LOCK_GENERIC) {
 291                 return ntvfs_map_lock(ntvfs, req, lck);
 292         }
 293 
 294         if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
 295                 return pvfs_oplock_release(ntvfs, req, lck);
 296         }
 297 
 298         f = pvfs_find_fd(pvfs, req, lck->lockx.in.file.ntvfs);
 299         if (!f) {
 300                 return NT_STATUS_INVALID_HANDLE;
 301         }
 302 
 303         if (f->handle->fd == -1) {
 304                 return NT_STATUS_FILE_IS_A_DIRECTORY;
 305         }
 306 
 307         status = pvfs_break_level2_oplocks(f);
 308         NT_STATUS_NOT_OK_RETURN(status);
 309 
 310         if (lck->lockx.in.timeout != 0 && 
 311             (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
 312                 pending = talloc(f, struct pvfs_pending_lock);
 313                 if (pending == NULL) {
 314                         return NT_STATUS_NO_MEMORY;
 315                 }
 316 
 317                 pending->pvfs = pvfs;
 318                 pending->lck = lck;
 319                 pending->f = f;
 320                 pending->req = req;
 321 
 322                 pending->end_time = 
 323                         timeval_current_ofs(lck->lockx.in.timeout/1000,
 324                                             1000*(lck->lockx.in.timeout%1000));
 325         }
 326 
 327         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
 328                 rw = pending? PENDING_READ_LOCK : READ_LOCK;
 329         } else {
 330                 rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
 331         }
 332 
 333         if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
 334                 talloc_free(pending);
 335                 return pvfs_lock_cancel(pvfs, req, lck, f);
 336         }
 337 
 338         if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
 339                 /* this seems to not be supported by any windows server,
 340                    or used by any clients */
 341                 talloc_free(pending);
 342                 return NT_STATUS_DOS(ERRDOS, ERRnoatomiclocks);
 343         }
 344 
 345         /* the unlocks happen first */
 346         locks = lck->lockx.in.locks;
 347 
 348         for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
 349                 status = brl_unlock(pvfs->brl_context,
 350                                     f->brl_handle,
 351                                     locks[i].pid,
 352                                     locks[i].offset,
 353                                     locks[i].count);
 354                 if (!NT_STATUS_IS_OK(status)) {
 355                         talloc_free(pending);
 356                         return status;
 357                 }
 358                 f->lock_count--;
 359         }
 360 
 361         locks += i;
 362 
 363         for (i=0;i<lck->lockx.in.lock_cnt;i++) {
 364                 if (pending) {
 365                         pending->pending_lock = i;
 366                 }
 367 
 368                 status = brl_lock(pvfs->brl_context,
 369                                   f->brl_handle,
 370                                   locks[i].pid,
 371                                   locks[i].offset,
 372                                   locks[i].count,
 373                                   rw, pending);
 374                 if (!NT_STATUS_IS_OK(status)) {
 375                         if (pending) {
 376                                 /* a timed lock failed - setup a wait message to handle
 377                                    the pending lock notification or a timeout */
 378                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
 379                                                                          pending->end_time,
 380                                                                          pvfs_pending_lock_continue,
 381                                                                          pending);
 382                                 if (pending->wait_handle == NULL) {
 383                                         talloc_free(pending);
 384                                         return NT_STATUS_NO_MEMORY;
 385                                 }
 386                                 talloc_steal(pending, pending->wait_handle);
 387                                 DLIST_ADD(f->pending_list, pending);
 388                                 return NT_STATUS_OK;
 389                         }
 390 
 391                         /* undo the locks we just did */
 392                         for (i--;i>=0;i--) {
 393                                 brl_unlock(pvfs->brl_context,
 394                                            f->brl_handle,
 395                                            locks[i].pid,
 396                                            locks[i].offset,
 397                                            locks[i].count);
 398                                 f->lock_count--;
 399                         }
 400                         talloc_free(pending);
 401                         return status;
 402                 }
 403                 f->lock_count++;
 404         }
 405 
 406         talloc_free(pending);
 407         return NT_STATUS_OK;
 408 }

/* [<][>][^][v][top][bottom][index][help] */