/* [<][>][^][v][top][bottom][index][help] */
DEFINITIONS
This source file includes following definitions.
- cli_read_max_bufsize
- cli_write_max_bufsize
- cli_read_andx_send
- cli_read_andx_recv
- cli_readall_send
- cli_readall_done
- cli_readall_recv
- cli_pull_print
- cli_pull_send
- cli_pull_read_done
- cli_pull_recv
- cli_pull
- cli_read_sink
- cli_read
- cli_issue_write
- cli_write
- cli_smbwrite
- cli_write_andx_send
- cli_write_andx_recv
- cli_writeall_send
- cli_writeall_written
- cli_writeall_recv
- cli_push_write_setup
- cli_push_send
- cli_push_written
- cli_push_recv
- cli_push
1 /*
2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "includes.h"
21
22 /****************************************************************************
23 Calculate the recommended read buffer size
24 ****************************************************************************/
25 static size_t cli_read_max_bufsize(struct cli_state *cli)
/* [<][>][^][v][top][bottom][index][help] */
26 {
27 if (!client_is_signing_on(cli) && !cli_encryption_on(cli)
28 && (cli->posix_capabilities & CIFS_UNIX_LARGE_READ_CAP)) {
29 return CLI_SAMBA_MAX_POSIX_LARGE_READX_SIZE;
30 }
31 if (cli->capabilities & CAP_LARGE_READX) {
32 return cli->is_samba
33 ? CLI_SAMBA_MAX_LARGE_READX_SIZE
34 : CLI_WINDOWS_MAX_LARGE_READX_SIZE;
35 }
36 return (cli->max_xmit - (smb_size+32)) & ~1023;
37 }
38
39 /****************************************************************************
40 Calculate the recommended write buffer size
41 ****************************************************************************/
42 static size_t cli_write_max_bufsize(struct cli_state *cli, uint16_t write_mode)
/* [<][>][^][v][top][bottom][index][help] */
43 {
44 if (write_mode == 0 &&
45 !client_is_signing_on(cli) &&
46 !cli_encryption_on(cli) &&
47 (cli->posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) &&
48 (cli->capabilities & CAP_LARGE_FILES)) {
49 /* Only do massive writes if we can do them direct
50 * with no signing or encrypting - not on a pipe. */
51 return CLI_SAMBA_MAX_POSIX_LARGE_WRITEX_SIZE;
52 }
53
54 if (cli->is_samba) {
55 return CLI_SAMBA_MAX_LARGE_WRITEX_SIZE;
56 }
57
58 if (((cli->capabilities & CAP_LARGE_WRITEX) == 0)
59 || client_is_signing_on(cli)
60 || strequal(cli->dev, "LPT1:")) {
61
62 /*
63 * Printer devices are restricted to max_xmit writesize in
64 * Vista and XPSP3 as are signing connections.
65 */
66
67 return (cli->max_xmit - (smb_size+32)) & ~1023;
68 }
69
70 return CLI_WINDOWS_MAX_LARGE_WRITEX_SIZE;
71 }
72
73
74 /*
75 * Send a read&x request
76 */
77
78 struct async_req *cli_read_andx_send(TALLOC_CTX *mem_ctx,
/* [<][>][^][v][top][bottom][index][help] */
79 struct event_context *ev,
80 struct cli_state *cli, int fnum,
81 off_t offset, size_t size)
82 {
83 struct async_req *result;
84 struct cli_request *req;
85 bool bigoffset = False;
86
87 uint16_t vwv[12];
88 uint8_t wct = 10;
89
90 if (size > cli_read_max_bufsize(cli)) {
91 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "
92 "size=%d\n", (int)size,
93 (int)cli_read_max_bufsize(cli)));
94 return NULL;
95 }
96
97 SCVAL(vwv + 0, 0, 0xFF);
98 SCVAL(vwv + 0, 1, 0);
99 SSVAL(vwv + 1, 0, 0);
100 SSVAL(vwv + 2, 0, fnum);
101 SIVAL(vwv + 3, 0, offset);
102 SSVAL(vwv + 5, 0, size);
103 SSVAL(vwv + 6, 0, size);
104 SSVAL(vwv + 7, 0, (size >> 16));
105 SSVAL(vwv + 8, 0, 0);
106 SSVAL(vwv + 9, 0, 0);
107
108 if ((uint64_t)offset >> 32) {
109 bigoffset = True;
110 SIVAL(vwv + 10, 0,
111 (((uint64_t)offset)>>32) & 0xffffffff);
112 wct += 2;
113 }
114
115 result = cli_request_send(mem_ctx, ev, cli, SMBreadX, 0, wct, vwv, 0,
116 0, NULL);
117 if (result == NULL) {
118 return NULL;
119 }
120
121 req = talloc_get_type_abort(result->private_data, struct cli_request);
122
123 req->data.read.ofs = offset;
124 req->data.read.size = size;
125 req->data.read.received = 0;
126 req->data.read.rcvbuf = NULL;
127
128 return result;
129 }
130
131 /*
132 * Pull the data out of a finished async read_and_x request. rcvbuf is
133 * talloced from the request, so better make sure that you copy it away before
134 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
135 * talloc_move it!
136 */
137
138 NTSTATUS cli_read_andx_recv(struct async_req *req, ssize_t *received,
/* [<][>][^][v][top][bottom][index][help] */
139 uint8_t **rcvbuf)
140 {
141 struct cli_request *cli_req = talloc_get_type_abort(
142 req->private_data, struct cli_request);
143 uint8_t wct;
144 uint16_t *vwv;
145 uint16_t num_bytes;
146 uint8_t *bytes;
147 uint8_t *buf;
148 NTSTATUS status;
149 size_t size;
150
151 if (async_req_is_nterror(req, &status)) {
152 return status;
153 }
154
155 status = cli_pull_reply(req, &wct, &vwv, &num_bytes, &bytes);
156
157 if (NT_STATUS_IS_ERR(status)) {
158 return status;
159 }
160
161 if (wct < 12) {
162 return NT_STATUS_INVALID_NETWORK_RESPONSE;
163 }
164
165 /* size is the number of bytes the server returned.
166 * Might be zero. */
167 size = SVAL(vwv + 5, 0);
168 size |= (((unsigned int)SVAL(vwv + 7, 0)) << 16);
169
170 if (size > cli_req->data.read.size) {
171 DEBUG(5,("server returned more than we wanted!\n"));
172 return NT_STATUS_UNEXPECTED_IO_ERROR;
173 }
174
175 /*
176 * bcc field must be valid for small reads, for large reads the 16-bit
177 * bcc field can't be correct.
178 */
179
180 if ((size < 0xffff) && (size > num_bytes)) {
181 DEBUG(5, ("server announced more bytes than sent\n"));
182 return NT_STATUS_INVALID_NETWORK_RESPONSE;
183 }
184
185 buf = (uint8_t *)smb_base(cli_req->inbuf) + SVAL(vwv+6, 0);
186
187 if (trans_oob(smb_len(cli_req->inbuf), SVAL(vwv+6, 0), size)
188 || (size && (buf < bytes))) {
189 DEBUG(5, ("server returned invalid read&x data offset\n"));
190 return NT_STATUS_INVALID_NETWORK_RESPONSE;
191 }
192
193 *rcvbuf = (uint8_t *)(smb_base(cli_req->inbuf) + SVAL(vwv + 6, 0));
194 *received = size;
195 return NT_STATUS_OK;
196 }
197
198 struct cli_readall_state {
199 struct tevent_context *ev;
200 struct cli_state *cli;
201 uint16_t fnum;
202 off_t start_offset;
203 size_t size;
204 size_t received;
205 uint8_t *buf;
206 };
207
208 static void cli_readall_done(struct async_req *subreq);
209
210 static struct async_req *cli_readall_send(TALLOC_CTX *mem_ctx,
/* [<][>][^][v][top][bottom][index][help] */
211 struct event_context *ev,
212 struct cli_state *cli,
213 uint16_t fnum,
214 off_t offset, size_t size)
215 {
216 struct async_req *req, *subreq;
217 struct cli_readall_state *state;
218
219 if (!async_req_setup(mem_ctx, &req, &state,
220 struct cli_readall_state)) {
221 return NULL;
222 }
223 state->ev = ev;
224 state->cli = cli;
225 state->fnum = fnum;
226 state->start_offset = offset;
227 state->size = size;
228 state->received = 0;
229 state->buf = NULL;
230
231 subreq = cli_read_andx_send(state, ev, cli, fnum, offset, size);
232 if (async_req_nomem(subreq, req)) {
233 TALLOC_FREE(req);
234 return NULL;
235 }
236 subreq->async.fn = cli_readall_done;
237 subreq->async.priv = req;
238 return req;
239 }
240
241 static void cli_readall_done(struct async_req *subreq)
/* [<][>][^][v][top][bottom][index][help] */
242 {
243 struct async_req *req = talloc_get_type_abort(
244 subreq->async.priv, struct async_req);
245 struct cli_readall_state *state = talloc_get_type_abort(
246 req->private_data, struct cli_readall_state);
247 ssize_t received;
248 uint8_t *buf;
249 NTSTATUS status;
250
251 status = cli_read_andx_recv(subreq, &received, &buf);
252 if (!NT_STATUS_IS_OK(status)) {
253 async_req_nterror(req, status);
254 return;
255 }
256
257 if (received == 0) {
258 /* EOF */
259 async_req_done(req);
260 return;
261 }
262
263 if ((state->received == 0) && (received == state->size)) {
264 /* Ideal case: Got it all in one run */
265 state->buf = buf;
266 state->received += received;
267 async_req_done(req);
268 return;
269 }
270
271 /*
272 * We got a short read, issue a read for the
273 * rest. Unfortunately we have to allocate the buffer
274 * ourselves now, as our caller expects to receive a single
275 * buffer. cli_read_andx does it from the buffer received from
276 * the net, but with a short read we have to put it together
277 * from several reads.
278 */
279
280 if (state->buf == NULL) {
281 state->buf = talloc_array(state, uint8_t, state->size);
282 if (async_req_nomem(state->buf, req)) {
283 return;
284 }
285 }
286 memcpy(state->buf + state->received, buf, received);
287 state->received += received;
288
289 TALLOC_FREE(subreq);
290
291 if (state->received >= state->size) {
292 async_req_done(req);
293 return;
294 }
295
296 subreq = cli_read_andx_send(state, state->ev, state->cli, state->fnum,
297 state->start_offset + state->received,
298 state->size - state->received);
299 if (async_req_nomem(subreq, req)) {
300 return;
301 }
302 subreq->async.fn = cli_readall_done;
303 subreq->async.priv = req;
304 }
305
306 static NTSTATUS cli_readall_recv(struct async_req *req, ssize_t *received,
/* [<][>][^][v][top][bottom][index][help] */
307 uint8_t **rcvbuf)
308 {
309 struct cli_readall_state *state = talloc_get_type_abort(
310 req->private_data, struct cli_readall_state);
311 NTSTATUS status;
312
313 if (async_req_is_nterror(req, &status)) {
314 return status;
315 }
316 *received = state->received;
317 *rcvbuf = state->buf;
318 return NT_STATUS_OK;
319 }
320
321 struct cli_pull_subreq {
322 struct async_req *req;
323 ssize_t received;
324 uint8_t *buf;
325 };
326
327 /*
328 * Parallel read support.
329 *
330 * cli_pull sends as many read&x requests as the server would allow via
331 * max_mux at a time. When replies flow back in, the data is written into
332 * the callback function "sink" in the right order.
333 */
334
335 struct cli_pull_state {
336 struct async_req *req;
337
338 struct event_context *ev;
339 struct cli_state *cli;
340 uint16_t fnum;
341 off_t start_offset;
342 SMB_OFF_T size;
343
344 NTSTATUS (*sink)(char *buf, size_t n, void *priv);
345 void *priv;
346
347 size_t chunk_size;
348
349 /*
350 * Outstanding requests
351 */
352 int num_reqs;
353 struct cli_pull_subreq *reqs;
354
355 /*
356 * For how many bytes did we send requests already?
357 */
358 SMB_OFF_T requested;
359
360 /*
361 * Next request index to push into "sink". This walks around the "req"
362 * array, taking care that the requests are pushed to "sink" in the
363 * right order. If necessary (i.e. replies don't come in in the right
364 * order), replies are held back in "reqs".
365 */
366 int top_req;
367
368 /*
369 * How many bytes did we push into "sink"?
370 */
371
372 SMB_OFF_T pushed;
373 };
374
375 static char *cli_pull_print(TALLOC_CTX *mem_ctx, struct async_req *req)
/* [<][>][^][v][top][bottom][index][help] */
376 {
377 struct cli_pull_state *state = talloc_get_type_abort(
378 req->private_data, struct cli_pull_state);
379 char *result;
380
381 result = async_req_print(mem_ctx, req);
382 if (result == NULL) {
383 return NULL;
384 }
385
386 return talloc_asprintf_append_buffer(
387 result, "num_reqs=%d, top_req=%d",
388 state->num_reqs, state->top_req);
389 }
390
391 static void cli_pull_read_done(struct async_req *read_req);
392
393 /*
394 * Prepare an async pull request
395 */
396
397 struct async_req *cli_pull_send(TALLOC_CTX *mem_ctx,
/* [<][>][^][v][top][bottom][index][help] */
398 struct event_context *ev,
399 struct cli_state *cli,
400 uint16_t fnum, off_t start_offset,
401 SMB_OFF_T size, size_t window_size,
402 NTSTATUS (*sink)(char *buf, size_t n,
403 void *priv),
404 void *priv)
405 {
406 struct async_req *result;
407 struct cli_pull_state *state;
408 int i;
409
410 if (!async_req_setup(mem_ctx, &result, &state,
411 struct cli_pull_state)) {
412 return NULL;
413 }
414 result->print = cli_pull_print;
415 state->req = result;
416
417 state->cli = cli;
418 state->ev = ev;
419 state->fnum = fnum;
420 state->start_offset = start_offset;
421 state->size = size;
422 state->sink = sink;
423 state->priv = priv;
424
425 state->pushed = 0;
426 state->top_req = 0;
427
428 if (size == 0) {
429 if (!async_post_ntstatus(result, ev, NT_STATUS_OK)) {
430 goto failed;
431 }
432 return result;
433 }
434
435 state->chunk_size = cli_read_max_bufsize(cli);
436
437 state->num_reqs = MAX(window_size/state->chunk_size, 1);
438 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
439
440 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_pull_subreq,
441 state->num_reqs);
442 if (state->reqs == NULL) {
443 goto failed;
444 }
445
446 state->requested = 0;
447
448 for (i=0; i<state->num_reqs; i++) {
449 struct cli_pull_subreq *subreq = &state->reqs[i];
450 SMB_OFF_T size_left;
451 size_t request_thistime;
452
453 if (state->requested >= size) {
454 state->num_reqs = i;
455 break;
456 }
457
458 size_left = size - state->requested;
459 request_thistime = MIN(size_left, state->chunk_size);
460
461 subreq->req = cli_readall_send(
462 state->reqs, ev, cli, fnum,
463 state->start_offset + state->requested,
464 request_thistime);
465
466 if (subreq->req == NULL) {
467 goto failed;
468 }
469
470 subreq->req->async.fn = cli_pull_read_done;
471 subreq->req->async.priv = result;
472
473 state->requested += request_thistime;
474 }
475 return result;
476
477 failed:
478 TALLOC_FREE(result);
479 return NULL;
480 }
481
482 /*
483 * Handle incoming read replies, push the data into sink and send out new
484 * requests if necessary.
485 */
486
487 static void cli_pull_read_done(struct async_req *read_req)
/* [<][>][^][v][top][bottom][index][help] */
488 {
489 struct async_req *pull_req = talloc_get_type_abort(
490 read_req->async.priv, struct async_req);
491 struct cli_pull_state *state = talloc_get_type_abort(
492 pull_req->private_data, struct cli_pull_state);
493 struct cli_pull_subreq *pull_subreq = NULL;
494 NTSTATUS status;
495 int i;
496
497 for (i = 0; i < state->num_reqs; i++) {
498 pull_subreq = &state->reqs[i];
499 if (read_req == pull_subreq->req) {
500 break;
501 }
502 }
503 if (i == state->num_reqs) {
504 /* Huh -- received something we did not send?? */
505 async_req_nterror(pull_req, NT_STATUS_INTERNAL_ERROR);
506 return;
507 }
508
509 status = cli_readall_recv(read_req, &pull_subreq->received,
510 &pull_subreq->buf);
511 if (!NT_STATUS_IS_OK(status)) {
512 async_req_nterror(state->req, status);
513 return;
514 }
515
516 /*
517 * This loop is the one to take care of out-of-order replies. All
518 * pending requests are in state->reqs, state->reqs[top_req] is the
519 * one that is to be pushed next. If however a request later than
520 * top_req is replied to, then we can't push yet. If top_req is
521 * replied to at a later point then, we need to push all the finished
522 * requests.
523 */
524
525 while (state->reqs[state->top_req].req != NULL) {
526 struct cli_pull_subreq *top_read;
527
528 DEBUG(11, ("cli_pull_read_done: top_req = %d\n",
529 state->top_req));
530
531 top_read = &state->reqs[state->top_req];
532
533 if (state->reqs[state->top_req].req->state < ASYNC_REQ_DONE) {
534 DEBUG(11, ("cli_pull_read_done: top request not yet "
535 "done\n"));
536 return;
537 }
538
539 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already "
540 "pushed\n", (int)top_read->received,
541 (int)state->pushed));
542
543 status = state->sink((char *)top_read->buf,
544 top_read->received, state->priv);
545 if (!NT_STATUS_IS_OK(status)) {
546 async_req_nterror(state->req, status);
547 return;
548 }
549 state->pushed += top_read->received;
550
551 TALLOC_FREE(state->reqs[state->top_req].req);
552
553 if (state->requested < state->size) {
554 struct async_req *new_req;
555 SMB_OFF_T size_left;
556 size_t request_thistime;
557
558 size_left = state->size - state->requested;
559 request_thistime = MIN(size_left, state->chunk_size);
560
561 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes "
562 "at %d, position %d\n",
563 (int)request_thistime,
564 (int)(state->start_offset
565 + state->requested),
566 state->top_req));
567
568 new_req = cli_readall_send(
569 state->reqs, state->ev, state->cli,
570 state->fnum,
571 state->start_offset + state->requested,
572 request_thistime);
573
574 if (async_req_nomem(new_req, state->req)) {
575 return;
576 }
577
578 new_req->async.fn = cli_pull_read_done;
579 new_req->async.priv = pull_req;
580
581 state->reqs[state->top_req].req = new_req;
582 state->requested += request_thistime;
583 }
584
585 state->top_req = (state->top_req+1) % state->num_reqs;
586 }
587
588 async_req_done(pull_req);
589 }
590
591 NTSTATUS cli_pull_recv(struct async_req *req, SMB_OFF_T *received)
/* [<][>][^][v][top][bottom][index][help] */
592 {
593 struct cli_pull_state *state = talloc_get_type_abort(
594 req->private_data, struct cli_pull_state);
595 NTSTATUS status;
596
597 if (async_req_is_nterror(req, &status)) {
598 return status;
599 }
600 *received = state->pushed;
601 return NT_STATUS_OK;
602 }
603
604 NTSTATUS cli_pull(struct cli_state *cli, uint16_t fnum,
/* [<][>][^][v][top][bottom][index][help] */
605 off_t start_offset, SMB_OFF_T size, size_t window_size,
606 NTSTATUS (*sink)(char *buf, size_t n, void *priv),
607 void *priv, SMB_OFF_T *received)
608 {
609 TALLOC_CTX *frame = talloc_stackframe();
610 struct event_context *ev;
611 struct async_req *req;
612 NTSTATUS result = NT_STATUS_NO_MEMORY;
613
614 if (cli->fd_event != NULL) {
615 /*
616 * Can't use sync call while an async call is in flight
617 */
618 return NT_STATUS_INVALID_PARAMETER;
619 }
620
621 ev = event_context_init(frame);
622 if (ev == NULL) {
623 goto nomem;
624 }
625
626 req = cli_pull_send(frame, ev, cli, fnum, start_offset, size,
627 window_size, sink, priv);
628 if (req == NULL) {
629 goto nomem;
630 }
631
632 while (req->state < ASYNC_REQ_DONE) {
633 event_loop_once(ev);
634 }
635
636 result = cli_pull_recv(req, received);
637 nomem:
638 TALLOC_FREE(frame);
639 return result;
640 }
641
642 static NTSTATUS cli_read_sink(char *buf, size_t n, void *priv)
/* [<][>][^][v][top][bottom][index][help] */
643 {
644 char **pbuf = (char **)priv;
645 memcpy(*pbuf, buf, n);
646 *pbuf += n;
647 return NT_STATUS_OK;
648 }
649
650 ssize_t cli_read(struct cli_state *cli, int fnum, char *buf,
/* [<][>][^][v][top][bottom][index][help] */
651 off_t offset, size_t size)
652 {
653 NTSTATUS status;
654 SMB_OFF_T ret;
655
656 status = cli_pull(cli, fnum, offset, size, size,
657 cli_read_sink, &buf, &ret);
658 if (!NT_STATUS_IS_OK(status)) {
659 cli_set_error(cli, status);
660 return -1;
661 }
662 return ret;
663 }
664
665 /****************************************************************************
666 Issue a single SMBwrite and don't wait for a reply.
667 ****************************************************************************/
668
669 static bool cli_issue_write(struct cli_state *cli,
/* [<][>][^][v][top][bottom][index][help] */
670 int fnum,
671 off_t offset,
672 uint16 mode,
673 const char *buf,
674 size_t size,
675 int i)
676 {
677 char *p;
678 bool large_writex = false;
679 /* We can only do direct writes if not signing and not encrypting. */
680 bool direct_writes = !client_is_signing_on(cli) && !cli_encryption_on(cli);
681
682 if (!direct_writes && size + 1 > cli->bufsize) {
683 cli->outbuf = (char *)SMB_REALLOC(cli->outbuf, size + 1024);
684 if (!cli->outbuf) {
685 return False;
686 }
687 cli->inbuf = (char *)SMB_REALLOC(cli->inbuf, size + 1024);
688 if (cli->inbuf == NULL) {
689 SAFE_FREE(cli->outbuf);
690 return False;
691 }
692 cli->bufsize = size + 1024;
693 }
694
695 memset(cli->outbuf,'\0',smb_size);
696 memset(cli->inbuf,'\0',smb_size);
697
698 if (cli->capabilities & CAP_LARGE_FILES) {
699 large_writex = True;
700 }
701
702 if (large_writex) {
703 cli_set_message(cli->outbuf,14,0,True);
704 } else {
705 cli_set_message(cli->outbuf,12,0,True);
706 }
707
708 SCVAL(cli->outbuf,smb_com,SMBwriteX);
709 SSVAL(cli->outbuf,smb_tid,cli->cnum);
710 cli_setup_packet(cli);
711
712 SCVAL(cli->outbuf,smb_vwv0,0xFF);
713 SSVAL(cli->outbuf,smb_vwv2,fnum);
714
715 SIVAL(cli->outbuf,smb_vwv3,offset);
716 SIVAL(cli->outbuf,smb_vwv5,0);
717 SSVAL(cli->outbuf,smb_vwv7,mode);
718
719 SSVAL(cli->outbuf,smb_vwv8,(mode & 0x0008) ? size : 0);
720 /*
721 * According to CIFS-TR-1p00, this following field should only
722 * be set if CAP_LARGE_WRITEX is set. We should check this
723 * locally. However, this check might already have been
724 * done by our callers.
725 */
726 SSVAL(cli->outbuf,smb_vwv9,(size>>16));
727 SSVAL(cli->outbuf,smb_vwv10,size);
728 /* +1 is pad byte. */
729 SSVAL(cli->outbuf,smb_vwv11,
730 smb_buf(cli->outbuf) - smb_base(cli->outbuf) + 1);
731
732 if (large_writex) {
733 SIVAL(cli->outbuf,smb_vwv12,(((uint64_t)offset)>>32) & 0xffffffff);
734 }
735
736 p = smb_base(cli->outbuf) + SVAL(cli->outbuf,smb_vwv11) -1;
737 *p++ = '\0'; /* pad byte. */
738 if (!direct_writes) {
739 memcpy(p, buf, size);
740 }
741 if (size > 0x1FFFF) {
742 /* This is a POSIX 14 word large write. */
743 set_message_bcc(cli->outbuf, 0); /* Set bcc to zero. */
744 _smb_setlen_large(cli->outbuf,smb_size + 28 + 1 /* pad */ + size - 4);
745 } else {
746 cli_setup_bcc(cli, p+size);
747 }
748
749 SSVAL(cli->outbuf,smb_mid,cli->mid + i);
750
751 show_msg(cli->outbuf);
752 if (direct_writes) {
753 /* For direct writes we now need to write the data
754 * directly out of buf. */
755 return cli_send_smb_direct_writeX(cli, buf, size);
756 } else {
757 return cli_send_smb(cli);
758 }
759 }
760
761 /****************************************************************************
762 write to a file
763 write_mode: 0x0001 disallow write cacheing
764 0x0002 return bytes remaining
765 0x0004 use raw named pipe protocol
766 0x0008 start of message mode named pipe protocol
767 ****************************************************************************/
768
769 ssize_t cli_write(struct cli_state *cli,
/* [<][>][^][v][top][bottom][index][help] */
770 int fnum, uint16 write_mode,
771 const char *buf, off_t offset, size_t size)
772 {
773 ssize_t bwritten = 0;
774 unsigned int issued = 0;
775 unsigned int received = 0;
776 int mpx = 1;
777 size_t writesize;
778 int blocks;
779
780 if(cli->max_mux > 1) {
781 mpx = cli->max_mux-1;
782 } else {
783 mpx = 1;
784 }
785
786 writesize = cli_write_max_bufsize(cli, write_mode);
787
788 blocks = (size + (writesize-1)) / writesize;
789
790 while (received < blocks) {
791
792 while ((issued - received < mpx) && (issued < blocks)) {
793 ssize_t bsent = issued * writesize;
794 ssize_t size1 = MIN(writesize, size - bsent);
795
796 if (!cli_issue_write(cli, fnum, offset + bsent,
797 write_mode,
798 buf + bsent,
799 size1, issued))
800 return -1;
801 issued++;
802 }
803
804 if (!cli_receive_smb(cli)) {
805 return bwritten;
806 }
807
808 received++;
809
810 if (cli_is_error(cli))
811 break;
812
813 bwritten += SVAL(cli->inbuf, smb_vwv2);
814 if (writesize > 0xFFFF) {
815 bwritten += (((int)(SVAL(cli->inbuf, smb_vwv4)))<<16);
816 }
817 }
818
819 while (received < issued && cli_receive_smb(cli)) {
820 received++;
821 }
822
823 return bwritten;
824 }
825
826 /****************************************************************************
827 write to a file using a SMBwrite and not bypassing 0 byte writes
828 ****************************************************************************/
829
830 ssize_t cli_smbwrite(struct cli_state *cli,
/* [<][>][^][v][top][bottom][index][help] */
831 int fnum, char *buf, off_t offset, size_t size1)
832 {
833 char *p;
834 ssize_t total = 0;
835
836 do {
837 size_t size = MIN(size1, cli->max_xmit - 48);
838
839 memset(cli->outbuf,'\0',smb_size);
840 memset(cli->inbuf,'\0',smb_size);
841
842 cli_set_message(cli->outbuf,5, 0,True);
843
844 SCVAL(cli->outbuf,smb_com,SMBwrite);
845 SSVAL(cli->outbuf,smb_tid,cli->cnum);
846 cli_setup_packet(cli);
847
848 SSVAL(cli->outbuf,smb_vwv0,fnum);
849 SSVAL(cli->outbuf,smb_vwv1,size);
850 SIVAL(cli->outbuf,smb_vwv2,offset);
851 SSVAL(cli->outbuf,smb_vwv4,0);
852
853 p = smb_buf(cli->outbuf);
854 *p++ = 1;
855 SSVAL(p, 0, size); p += 2;
856 memcpy(p, buf + total, size); p += size;
857
858 cli_setup_bcc(cli, p);
859
860 if (!cli_send_smb(cli))
861 return -1;
862
863 if (!cli_receive_smb(cli))
864 return -1;
865
866 if (cli_is_error(cli))
867 return -1;
868
869 size = SVAL(cli->inbuf,smb_vwv0);
870 if (size == 0)
871 break;
872
873 size1 -= size;
874 total += size;
875 offset += size;
876
877 } while (size1);
878
879 return total;
880 }
881
882 /*
883 * Send a write&x request
884 */
885
886 struct async_req *cli_write_andx_send(TALLOC_CTX *mem_ctx,
/* [<][>][^][v][top][bottom][index][help] */
887 struct event_context *ev,
888 struct cli_state *cli, uint16_t fnum,
889 uint16_t mode, const uint8_t *buf,
890 off_t offset, size_t size)
891 {
892 bool bigoffset = ((cli->capabilities & CAP_LARGE_FILES) != 0);
893 uint8_t wct = bigoffset ? 14 : 12;
894 size_t max_write = cli_write_max_bufsize(cli, mode);
895 uint16_t vwv[14];
896
897 size = MIN(size, max_write);
898
899 SCVAL(vwv+0, 0, 0xFF);
900 SCVAL(vwv+0, 1, 0);
901 SSVAL(vwv+1, 0, 0);
902 SSVAL(vwv+2, 0, fnum);
903 SIVAL(vwv+3, 0, offset);
904 SIVAL(vwv+5, 0, 0);
905 SSVAL(vwv+7, 0, mode);
906 SSVAL(vwv+8, 0, 0);
907 SSVAL(vwv+9, 0, (size>>16));
908 SSVAL(vwv+10, 0, size);
909
910 SSVAL(vwv+11, 0,
911 cli_wct_ofs(cli)
912 + 1 /* the wct field */
913 + wct * 2 /* vwv */
914 + 2 /* num_bytes field */
915 + 1 /* pad */);
916
917 if (bigoffset) {
918 SIVAL(vwv+12, 0, (((uint64_t)offset)>>32) & 0xffffffff);
919 }
920
921 return cli_request_send(mem_ctx, ev, cli, SMBwriteX, 0, wct, vwv,
922 2, size, buf);
923 }
924
925 NTSTATUS cli_write_andx_recv(struct async_req *req, size_t *pwritten)
/* [<][>][^][v][top][bottom][index][help] */
926 {
927 uint8_t wct;
928 uint16_t *vwv;
929 uint16_t num_bytes;
930 uint8_t *bytes;
931 NTSTATUS status;
932 size_t written;
933
934 if (async_req_is_nterror(req, &status)) {
935 return status;
936 }
937
938 status = cli_pull_reply(req, &wct, &vwv, &num_bytes, &bytes);
939
940 if (NT_STATUS_IS_ERR(status)) {
941 return status;
942 }
943
944 if (wct < 6) {
945 return NT_STATUS_INVALID_NETWORK_RESPONSE;
946 }
947
948 written = SVAL(vwv+2, 0);
949 written |= SVAL(vwv+4, 0)<<16;
950 *pwritten = written;
951
952 return NT_STATUS_OK;
953 }
954
955 struct cli_writeall_state {
956 struct event_context *ev;
957 struct cli_state *cli;
958 uint16_t fnum;
959 uint16_t mode;
960 const uint8_t *buf;
961 off_t offset;
962 size_t size;
963 size_t written;
964 };
965
966 static void cli_writeall_written(struct async_req *req);
967
968 static struct async_req *cli_writeall_send(TALLOC_CTX *mem_ctx,
/* [<][>][^][v][top][bottom][index][help] */
969 struct event_context *ev,
970 struct cli_state *cli,
971 uint16_t fnum,
972 uint16_t mode,
973 const uint8_t *buf,
974 off_t offset, size_t size)
975 {
976 struct async_req *result;
977 struct async_req *subreq;
978 struct cli_writeall_state *state;
979
980 if (!async_req_setup(mem_ctx, &result, &state,
981 struct cli_writeall_state)) {
982 return NULL;
983 }
984 state->ev = ev;
985 state->cli = cli;
986 state->fnum = fnum;
987 state->mode = mode;
988 state->buf = buf;
989 state->offset = offset;
990 state->size = size;
991 state->written = 0;
992
993 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
994 state->mode, state->buf, state->offset,
995 state->size);
996 if (subreq == NULL) {
997 goto fail;
998 }
999
1000 subreq->async.fn = cli_writeall_written;
1001 subreq->async.priv = result;
1002 return result;
1003
1004 fail:
1005 TALLOC_FREE(result);
1006 return NULL;
1007 }
1008
1009 static void cli_writeall_written(struct async_req *subreq)
/* [<][>][^][v][top][bottom][index][help] */
1010 {
1011 struct async_req *req = talloc_get_type_abort(
1012 subreq->async.priv, struct async_req);
1013 struct cli_writeall_state *state = talloc_get_type_abort(
1014 req->private_data, struct cli_writeall_state);
1015 NTSTATUS status;
1016 size_t written, to_write;
1017
1018 status = cli_write_andx_recv(subreq, &written);
1019 TALLOC_FREE(subreq);
1020 if (!NT_STATUS_IS_OK(status)) {
1021 async_req_nterror(req, status);
1022 return;
1023 }
1024
1025 state->written += written;
1026
1027 if (state->written > state->size) {
1028 async_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
1029 return;
1030 }
1031
1032 to_write = state->size - state->written;
1033
1034 if (to_write == 0) {
1035 async_req_done(req);
1036 return;
1037 }
1038
1039 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1040 state->mode,
1041 state->buf + state->written,
1042 state->offset + state->written, to_write);
1043 if (subreq == NULL) {
1044 async_req_nterror(req, NT_STATUS_NO_MEMORY);
1045 return;
1046 }
1047
1048 subreq->async.fn = cli_writeall_written;
1049 subreq->async.priv = req;
1050 }
1051
1052 static NTSTATUS cli_writeall_recv(struct async_req *req)
/* [<][>][^][v][top][bottom][index][help] */
1053 {
1054 return async_req_simple_recv_ntstatus(req);
1055 }
1056
1057 struct cli_push_write_state {
1058 struct async_req *req;/* This is the main request! Not the subreq */
1059 uint32_t idx;
1060 off_t ofs;
1061 uint8_t *buf;
1062 size_t size;
1063 };
1064
1065 struct cli_push_state {
1066 struct event_context *ev;
1067 struct cli_state *cli;
1068 uint16_t fnum;
1069 uint16_t mode;
1070 off_t start_offset;
1071 size_t window_size;
1072
1073 size_t (*source)(uint8_t *buf, size_t n, void *priv);
1074 void *priv;
1075
1076 bool eof;
1077
1078 size_t chunk_size;
1079 off_t next_offset;
1080
1081 /*
1082 * Outstanding requests
1083 */
1084 uint32_t pending;
1085 uint32_t num_reqs;
1086 struct cli_push_write_state **reqs;
1087 };
1088
1089 static void cli_push_written(struct async_req *req);
1090
1091 static bool cli_push_write_setup(struct async_req *req,
/* [<][>][^][v][top][bottom][index][help] */
1092 struct cli_push_state *state,
1093 uint32_t idx)
1094 {
1095 struct cli_push_write_state *substate;
1096 struct async_req *subreq;
1097
1098 substate = talloc(state->reqs, struct cli_push_write_state);
1099 if (!substate) {
1100 return false;
1101 }
1102 substate->req = req;
1103 substate->idx = idx;
1104 substate->ofs = state->next_offset;
1105 substate->buf = talloc_array(substate, uint8_t, state->chunk_size);
1106 if (!substate->buf) {
1107 talloc_free(substate);
1108 return false;
1109 }
1110 substate->size = state->source(substate->buf,
1111 state->chunk_size,
1112 state->priv);
1113 if (substate->size == 0) {
1114 state->eof = true;
1115 /* nothing to send */
1116 talloc_free(substate);
1117 return true;
1118 }
1119
1120 subreq = cli_writeall_send(substate,
1121 state->ev, state->cli,
1122 state->fnum, state->mode,
1123 substate->buf,
1124 substate->ofs,
1125 substate->size);
1126 if (!subreq) {
1127 talloc_free(substate);
1128 return false;
1129 }
1130 subreq->async.fn = cli_push_written;
1131 subreq->async.priv = substate;
1132
1133 state->reqs[idx] = substate;
1134 state->pending += 1;
1135 state->next_offset += substate->size;
1136
1137 return true;
1138 }
1139
1140 struct async_req *cli_push_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
/* [<][>][^][v][top][bottom][index][help] */
1141 struct cli_state *cli,
1142 uint16_t fnum, uint16_t mode,
1143 off_t start_offset, size_t window_size,
1144 size_t (*source)(uint8_t *buf, size_t n,
1145 void *priv),
1146 void *priv)
1147 {
1148 struct async_req *req;
1149 struct cli_push_state *state;
1150 uint32_t i;
1151
1152 if (!async_req_setup(mem_ctx, &req, &state,
1153 struct cli_push_state)) {
1154 return NULL;
1155 }
1156 state->cli = cli;
1157 state->ev = ev;
1158 state->fnum = fnum;
1159 state->start_offset = start_offset;
1160 state->mode = mode;
1161 state->source = source;
1162 state->priv = priv;
1163 state->eof = false;
1164 state->pending = 0;
1165 state->next_offset = start_offset;
1166
1167 state->chunk_size = cli_write_max_bufsize(cli, mode);
1168
1169 if (window_size == 0) {
1170 window_size = cli->max_mux * state->chunk_size;
1171 }
1172 state->num_reqs = window_size/state->chunk_size;
1173 if ((window_size % state->chunk_size) > 0) {
1174 state->num_reqs += 1;
1175 }
1176 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
1177 state->num_reqs = MAX(state->num_reqs, 1);
1178
1179 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_push_write_state *,
1180 state->num_reqs);
1181 if (state->reqs == NULL) {
1182 goto failed;
1183 }
1184
1185 for (i=0; i<state->num_reqs; i++) {
1186 if (!cli_push_write_setup(req, state, i)) {
1187 goto failed;
1188 }
1189
1190 if (state->eof) {
1191 break;
1192 }
1193 }
1194
1195 if (state->pending == 0) {
1196 if (!async_post_ntstatus(req, ev, NT_STATUS_OK)) {
1197 goto failed;
1198 }
1199 return req;
1200 }
1201
1202 return req;
1203
1204 failed:
1205 TALLOC_FREE(req);
1206 return NULL;
1207 }
1208
1209 static void cli_push_written(struct async_req *subreq)
/* [<][>][^][v][top][bottom][index][help] */
1210 {
1211 struct cli_push_write_state *substate = talloc_get_type_abort(
1212 subreq->async.priv, struct cli_push_write_state);
1213 struct async_req *req = substate->req;
1214 struct cli_push_state *state = talloc_get_type_abort(
1215 req->private_data, struct cli_push_state);
1216 NTSTATUS status;
1217 uint32_t idx = substate->idx;
1218
1219 state->reqs[idx] = NULL;
1220 state->pending -= 1;
1221
1222 status = cli_writeall_recv(subreq);
1223 TALLOC_FREE(subreq);
1224 TALLOC_FREE(substate);
1225 if (!NT_STATUS_IS_OK(status)) {
1226 async_req_nterror(req, status);
1227 return;
1228 }
1229
1230 if (!state->eof) {
1231 if (!cli_push_write_setup(req, state, idx)) {
1232 async_req_nomem(NULL, req);
1233 return;
1234 }
1235 }
1236
1237 if (state->pending == 0) {
1238 async_req_done(req);
1239 return;
1240 }
1241 }
1242
1243 NTSTATUS cli_push_recv(struct async_req *req)
/* [<][>][^][v][top][bottom][index][help] */
1244 {
1245 return async_req_simple_recv_ntstatus(req);
1246 }
1247
1248 NTSTATUS cli_push(struct cli_state *cli, uint16_t fnum, uint16_t mode,
/* [<][>][^][v][top][bottom][index][help] */
1249 off_t start_offset, size_t window_size,
1250 size_t (*source)(uint8_t *buf, size_t n, void *priv),
1251 void *priv)
1252 {
1253 TALLOC_CTX *frame = talloc_stackframe();
1254 struct event_context *ev;
1255 struct async_req *req;
1256 NTSTATUS result = NT_STATUS_NO_MEMORY;
1257
1258 if (cli->fd_event != NULL) {
1259 /*
1260 * Can't use sync call while an async call is in flight
1261 */
1262 return NT_STATUS_INVALID_PARAMETER;
1263 }
1264
1265 ev = event_context_init(frame);
1266 if (ev == NULL) {
1267 goto nomem;
1268 }
1269
1270 req = cli_push_send(frame, ev, cli, fnum, mode, start_offset,
1271 window_size, source, priv);
1272 if (req == NULL) {
1273 goto nomem;
1274 }
1275
1276 while (req->state < ASYNC_REQ_DONE) {
1277 event_loop_once(ev);
1278 }
1279
1280 result = cli_push_recv(req);
1281 nomem:
1282 TALLOC_FREE(frame);
1283 return result;
1284 }