root/source3/rpc_server/srv_pipe_hnd.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_first_internal_pipe
  2. get_next_internal_pipe
  3. pipe_init_outgoing_data
  4. make_internal_rpc_pipe_p
  5. set_incoming_fault
  6. fill_rpc_header
  7. unmarshall_rpc_header
  8. free_pipe_context
  9. process_request_pdu
  10. process_complete_pdu
  11. process_incoming_data
  12. write_to_internal_pipe
  13. read_from_internal_pipe
  14. close_internal_rpc_pipe_hnd
  15. fsp_is_np
  16. np_proxy_state_destructor
  17. make_external_rpc_pipe_p
  18. np_open
  19. np_write_send
  20. np_write_done
  21. np_write_recv
  22. rpc_frag_more_fn
  23. np_read_send
  24. np_read_trigger
  25. np_read_done
  26. np_read_recv
  27. rpc_pipe_open_internal

   1 /* 
   2  *  Unix SMB/CIFS implementation.
   3  *  RPC Pipe client / server routines
   4  *  Copyright (C) Andrew Tridgell              1992-1998,
   5  *  Largely re-written : 2005
   6  *  Copyright (C) Jeremy Allison                1998 - 2005
   7  *  
   8  *  This program is free software; you can redistribute it and/or modify
   9  *  it under the terms of the GNU General Public License as published by
  10  *  the Free Software Foundation; either version 3 of the License, or
  11  *  (at your option) any later version.
  12  *  
  13  *  This program is distributed in the hope that it will be useful,
  14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16  *  GNU General Public License for more details.
  17  *  
  18  *  You should have received a copy of the GNU General Public License
  19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  20  */
  21 
  22 #include "includes.h"
  23 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
  24 
  25 #undef DBGC_CLASS
  26 #define DBGC_CLASS DBGC_RPC_SRV
  27 
  28 static int pipes_open;
  29 
  30 static pipes_struct *InternalPipes;
  31 
  32 /* TODO
  33  * the following prototypes are declared here to avoid
  34  * code being moved about too much for a patch to be
  35  * disrupted / less obvious.
  36  *
  37  * these functions, and associated functions that they
  38  * call, should be moved behind a .so module-loading
  39  * system _anyway_.  so that's the next step...
  40  */
  41 
  42 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
  43 
  44 /****************************************************************************
  45  Internal Pipe iterator functions.
  46 ****************************************************************************/
  47 
  48 pipes_struct *get_first_internal_pipe(void)
     /* [<][>][^][v][top][bottom][index][help] */
  49 {
  50         return InternalPipes;
  51 }
  52 
  53 pipes_struct *get_next_internal_pipe(pipes_struct *p)
     /* [<][>][^][v][top][bottom][index][help] */
  54 {
  55         return p->next;
  56 }
  57 
  58 /****************************************************************************
  59  Initialise an outgoing packet.
  60 ****************************************************************************/
  61 
  62 static bool pipe_init_outgoing_data(pipes_struct *p)
     /* [<][>][^][v][top][bottom][index][help] */
  63 {
  64         output_data *o_data = &p->out_data;
  65 
  66         /* Reset the offset counters. */
  67         o_data->data_sent_length = 0;
  68         o_data->current_pdu_sent = 0;
  69 
  70         prs_mem_free(&o_data->frag);
  71 
  72         /* Free any memory in the current return data buffer. */
  73         prs_mem_free(&o_data->rdata);
  74 
  75         /*
  76          * Initialize the outgoing RPC data buffer.
  77          * we will use this as the raw data area for replying to rpc requests.
  78          */     
  79         if(!prs_init(&o_data->rdata, 128, p->mem_ctx, MARSHALL)) {
  80                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
  81                 return False;
  82         }
  83 
  84         return True;
  85 }
  86 
  87 /****************************************************************************
  88  Make an internal namedpipes structure
  89 ****************************************************************************/
  90 
  91 static struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
  92                                                      const struct ndr_syntax_id *syntax,
  93                                                      const char *client_address,
  94                                                      struct auth_serversupplied_info *server_info)
  95 {
  96         pipes_struct *p;
  97 
  98         DEBUG(4,("Create pipe requested %s\n",
  99                  get_pipe_name_from_iface(syntax)));
 100 
 101         p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
 102 
 103         if (!p) {
 104                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
 105                 return NULL;
 106         }
 107 
 108         if ((p->mem_ctx = talloc_init("pipe %s %p",
 109                                       get_pipe_name_from_iface(syntax),
 110                                       p)) == NULL) {
 111                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
 112                 TALLOC_FREE(p);
 113                 return NULL;
 114         }
 115 
 116         if (!init_pipe_handle_list(p, syntax)) {
 117                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
 118                 talloc_destroy(p->mem_ctx);
 119                 TALLOC_FREE(p);
 120                 return NULL;
 121         }
 122 
 123         /*
 124          * Initialize the incoming RPC data buffer with one PDU worth of memory.
 125          * We cheat here and say we're marshalling, as we intend to add incoming
 126          * data directly into the prs_struct and we want it to auto grow. We will
 127          * change the type to UNMARSALLING before processing the stream.
 128          */
 129 
 130         if(!prs_init(&p->in_data.data, 128, p->mem_ctx, MARSHALL)) {
 131                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
 132                 talloc_destroy(p->mem_ctx);
 133                 close_policy_by_pipe(p);
 134                 TALLOC_FREE(p);
 135                 return NULL;
 136         }
 137 
 138         p->server_info = copy_serverinfo(p, server_info);
 139         if (p->server_info == NULL) {
 140                 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
 141                 talloc_destroy(p->mem_ctx);
 142                 close_policy_by_pipe(p);
 143                 TALLOC_FREE(p);
 144                 return NULL;
 145         }
 146 
 147         DLIST_ADD(InternalPipes, p);
 148 
 149         memcpy(p->client_address, client_address, sizeof(p->client_address));
 150 
 151         p->endian = RPC_LITTLE_ENDIAN;
 152 
 153         /*
 154          * Initialize the outgoing RPC data buffer with no memory.
 155          */     
 156         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
 157 
 158         p->syntax = *syntax;
 159 
 160         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
 161                  get_pipe_name_from_iface(syntax), pipes_open));
 162 
 163         talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
 164 
 165         return p;
 166 }
 167 
 168 /****************************************************************************
 169  Sets the fault state on incoming packets.
 170 ****************************************************************************/
 171 
 172 static void set_incoming_fault(pipes_struct *p)
     /* [<][>][^][v][top][bottom][index][help] */
 173 {
 174         prs_mem_free(&p->in_data.data);
 175         p->in_data.pdu_needed_len = 0;
 176         p->in_data.pdu_received_len = 0;
 177         p->fault_state = True;
 178         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
 179                    get_pipe_name_from_iface(&p->syntax)));
 180 }
 181 
 182 /****************************************************************************
 183  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
 184 ****************************************************************************/
 185 
 186 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
     /* [<][>][^][v][top][bottom][index][help] */
 187 {
 188         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
 189 
 190         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
 191                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
 192                         (unsigned int)p->in_data.pdu_received_len ));
 193 
 194         if (p->in_data.current_in_pdu == NULL) {
 195                 p->in_data.current_in_pdu = talloc_array(p, uint8_t,
 196                                                          RPC_HEADER_LEN);
 197         }
 198         if (p->in_data.current_in_pdu == NULL) {
 199                 DEBUG(0, ("talloc failed\n"));
 200                 return -1;
 201         }
 202 
 203         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
 204         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
 205 
 206         return (ssize_t)len_needed_to_complete_hdr;
 207 }
 208 
 209 /****************************************************************************
 210  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
 211 ****************************************************************************/
 212 
 213 static ssize_t unmarshall_rpc_header(pipes_struct *p)
     /* [<][>][^][v][top][bottom][index][help] */
 214 {
 215         /*
 216          * Unmarshall the header to determine the needed length.
 217          */
 218 
 219         prs_struct rpc_in;
 220 
 221         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
 222                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
 223                 set_incoming_fault(p);
 224                 return -1;
 225         }
 226 
 227         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
 228         prs_set_endian_data( &rpc_in, p->endian);
 229 
 230         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
 231                                         p->in_data.pdu_received_len, False);
 232 
 233         /*
 234          * Unmarshall the header as this will tell us how much
 235          * data we need to read to get the complete pdu.
 236          * This also sets the endian flag in rpc_in.
 237          */
 238 
 239         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
 240                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
 241                 set_incoming_fault(p);
 242                 prs_mem_free(&rpc_in);
 243                 return -1;
 244         }
 245 
 246         /*
 247          * Validate the RPC header.
 248          */
 249 
 250         if(p->hdr.major != 5 && p->hdr.minor != 0) {
 251                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
 252                 set_incoming_fault(p);
 253                 prs_mem_free(&rpc_in);
 254                 return -1;
 255         }
 256 
 257         /*
 258          * If there's not data in the incoming buffer this should be the start of a new RPC.
 259          */
 260 
 261         if(prs_offset(&p->in_data.data) == 0) {
 262 
 263                 /*
 264                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
 265                  */
 266 
 267                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
 268                         /*
 269                          * Ensure that the FIRST flag is set. If not then we have
 270                          * a stream missmatch.
 271                          */
 272 
 273                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
 274                         set_incoming_fault(p);
 275                         prs_mem_free(&rpc_in);
 276                         return -1;
 277                 }
 278 
 279                 /*
 280                  * If this is the first PDU then set the endianness
 281                  * flag in the pipe. We will need this when parsing all
 282                  * data in this RPC.
 283                  */
 284 
 285                 p->endian = rpc_in.bigendian_data;
 286 
 287                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
 288                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
 289 
 290         } else {
 291 
 292                 /*
 293                  * If this is *NOT* the first PDU then check the endianness
 294                  * flag in the pipe is the same as that in the PDU.
 295                  */
 296 
 297                 if (p->endian != rpc_in.bigendian_data) {
 298                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
 299                         set_incoming_fault(p);
 300                         prs_mem_free(&rpc_in);
 301                         return -1;
 302                 }
 303         }
 304 
 305         /*
 306          * Ensure that the pdu length is sane.
 307          */
 308 
 309         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
 310                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
 311                 set_incoming_fault(p);
 312                 prs_mem_free(&rpc_in);
 313                 return -1;
 314         }
 315 
 316         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
 317                         (unsigned int)p->hdr.flags ));
 318 
 319         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
 320 
 321         prs_mem_free(&rpc_in);
 322 
 323         p->in_data.current_in_pdu = TALLOC_REALLOC_ARRAY(
 324                 p, p->in_data.current_in_pdu, uint8_t, p->hdr.frag_len);
 325         if (p->in_data.current_in_pdu == NULL) {
 326                 DEBUG(0, ("talloc failed\n"));
 327                 set_incoming_fault(p);
 328                 return -1;
 329         }
 330 
 331         return 0; /* No extra data processed. */
 332 }
 333 
 334 /****************************************************************************
 335  Call this to free any talloc'ed memory. Do this before and after processing
 336  a complete PDU.
 337 ****************************************************************************/
 338 
 339 static void free_pipe_context(pipes_struct *p)
     /* [<][>][^][v][top][bottom][index][help] */
 340 {
 341         if (p->mem_ctx) {
 342                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
 343                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
 344                 talloc_free_children(p->mem_ctx);
 345         } else {
 346                 p->mem_ctx = talloc_init(
 347                         "pipe %s %p", get_pipe_name_from_iface(&p->syntax), p);
 348                 if (p->mem_ctx == NULL) {
 349                         p->fault_state = True;
 350                 }
 351         }
 352 }
 353 
 354 /****************************************************************************
 355  Processes a request pdu. This will do auth processing if needed, and
 356  appends the data into the complete stream if the LAST flag is not set.
 357 ****************************************************************************/
 358 
 359 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
     /* [<][>][^][v][top][bottom][index][help] */
 360 {
 361         uint32 ss_padding_len = 0;
 362         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
 363                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
 364 
 365         if(!p->pipe_bound) {
 366                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
 367                 set_incoming_fault(p);
 368                 return False;
 369         }
 370 
 371         /*
 372          * Check if we need to do authentication processing.
 373          * This is only done on requests, not binds.
 374          */
 375 
 376         /*
 377          * Read the RPC request header.
 378          */
 379 
 380         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
 381                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
 382                 set_incoming_fault(p);
 383                 return False;
 384         }
 385 
 386         switch(p->auth.auth_type) {
 387                 case PIPE_AUTH_TYPE_NONE:
 388                         break;
 389 
 390                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
 391                 case PIPE_AUTH_TYPE_NTLMSSP:
 392                 {
 393                         NTSTATUS status;
 394                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
 395                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
 396                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
 397                                 set_incoming_fault(p);
 398                                 return False;
 399                         }
 400                         break;
 401                 }
 402 
 403                 case PIPE_AUTH_TYPE_SCHANNEL:
 404                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
 405                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
 406                                 set_incoming_fault(p);
 407                                 return False;
 408                         }
 409                         break;
 410 
 411                 default:
 412                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
 413                         set_incoming_fault(p);
 414                         return False;
 415         }
 416 
 417         /* Now we've done the sign/seal we can remove any padding data. */
 418         if (data_len > ss_padding_len) {
 419                 data_len -= ss_padding_len;
 420         }
 421 
 422         /*
 423          * Check the data length doesn't go over the 15Mb limit.
 424          * increased after observing a bug in the Windows NT 4.0 SP6a
 425          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
 426          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
 427          */
 428         
 429         if(prs_offset(&p->in_data.data) + data_len > MAX_RPC_DATA_SIZE) {
 430                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
 431                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
 432                 set_incoming_fault(p);
 433                 return False;
 434         }
 435 
 436         /*
 437          * Append the data portion into the buffer and return.
 438          */
 439 
 440         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
 441                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
 442                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
 443                 set_incoming_fault(p);
 444                 return False;
 445         }
 446 
 447         if(p->hdr.flags & RPC_FLG_LAST) {
 448                 bool ret = False;
 449                 /*
 450                  * Ok - we finally have a complete RPC stream.
 451                  * Call the rpc command to process it.
 452                  */
 453 
 454                 /*
 455                  * Ensure the internal prs buffer size is *exactly* the same
 456                  * size as the current offset.
 457                  */
 458 
 459                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
 460                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
 461                         set_incoming_fault(p);
 462                         return False;
 463                 }
 464 
 465                 /*
 466                  * Set the parse offset to the start of the data and set the
 467                  * prs_struct to UNMARSHALL.
 468                  */
 469 
 470                 prs_set_offset(&p->in_data.data, 0);
 471                 prs_switch_type(&p->in_data.data, UNMARSHALL);
 472 
 473                 /*
 474                  * Process the complete data stream here.
 475                  */
 476 
 477                 free_pipe_context(p);
 478 
 479                 if(pipe_init_outgoing_data(p)) {
 480                         ret = api_pipe_request(p);
 481                 }
 482 
 483                 free_pipe_context(p);
 484 
 485                 /*
 486                  * We have consumed the whole data stream. Set back to
 487                  * marshalling and set the offset back to the start of
 488                  * the buffer to re-use it (we could also do a prs_mem_free()
 489                  * and then re_init on the next start of PDU. Not sure which
 490                  * is best here.... JRA.
 491                  */
 492 
 493                 prs_switch_type(&p->in_data.data, MARSHALL);
 494                 prs_set_offset(&p->in_data.data, 0);
 495                 return ret;
 496         }
 497 
 498         return True;
 499 }
 500 
 501 /****************************************************************************
 502  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
 503  already been parsed and stored in p->hdr.
 504 ****************************************************************************/
 505 
 506 static void process_complete_pdu(pipes_struct *p)
     /* [<][>][^][v][top][bottom][index][help] */
 507 {
 508         prs_struct rpc_in;
 509         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
 510         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
 511         bool reply = False;
 512 
 513         if(p->fault_state) {
 514                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
 515                          get_pipe_name_from_iface(&p->syntax)));
 516                 set_incoming_fault(p);
 517                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
 518                 return;
 519         }
 520 
 521         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
 522 
 523         /*
 524          * Ensure we're using the corrent endianness for both the 
 525          * RPC header flags and the raw data we will be reading from.
 526          */
 527 
 528         prs_set_endian_data( &rpc_in, p->endian);
 529         prs_set_endian_data( &p->in_data.data, p->endian);
 530 
 531         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
 532 
 533         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
 534                         (unsigned int)p->hdr.pkt_type ));
 535 
 536         switch (p->hdr.pkt_type) {
 537                 case RPC_REQUEST:
 538                         reply = process_request_pdu(p, &rpc_in);
 539                         break;
 540 
 541                 case RPC_PING: /* CL request - ignore... */
 542                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
 543                                 (unsigned int)p->hdr.pkt_type,
 544                                 get_pipe_name_from_iface(&p->syntax)));
 545                         break;
 546 
 547                 case RPC_RESPONSE: /* No responses here. */
 548                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
 549                                 get_pipe_name_from_iface(&p->syntax)));
 550                         break;
 551 
 552                 case RPC_FAULT:
 553                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
 554                 case RPC_NOCALL: /* CL - server reply to a ping call. */
 555                 case RPC_REJECT:
 556                 case RPC_ACK:
 557                 case RPC_CL_CANCEL:
 558                 case RPC_FACK:
 559                 case RPC_CANCEL_ACK:
 560                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
 561                                 (unsigned int)p->hdr.pkt_type,
 562                                 get_pipe_name_from_iface(&p->syntax)));
 563                         break;
 564 
 565                 case RPC_BIND:
 566                         /*
 567                          * We assume that a pipe bind is only in one pdu.
 568                          */
 569                         if(pipe_init_outgoing_data(p)) {
 570                                 reply = api_pipe_bind_req(p, &rpc_in);
 571                         }
 572                         break;
 573 
 574                 case RPC_BINDACK:
 575                 case RPC_BINDNACK:
 576                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
 577                                 (unsigned int)p->hdr.pkt_type,
 578                                 get_pipe_name_from_iface(&p->syntax)));
 579                         break;
 580 
 581 
 582                 case RPC_ALTCONT:
 583                         /*
 584                          * We assume that a pipe bind is only in one pdu.
 585                          */
 586                         if(pipe_init_outgoing_data(p)) {
 587                                 reply = api_pipe_alter_context(p, &rpc_in);
 588                         }
 589                         break;
 590 
 591                 case RPC_ALTCONTRESP:
 592                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
 593                                 get_pipe_name_from_iface(&p->syntax)));
 594                         break;
 595 
 596                 case RPC_AUTH3:
 597                         /*
 598                          * The third packet in an NTLMSSP auth exchange.
 599                          */
 600                         if(pipe_init_outgoing_data(p)) {
 601                                 reply = api_pipe_bind_auth3(p, &rpc_in);
 602                         }
 603                         break;
 604 
 605                 case RPC_SHUTDOWN:
 606                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
 607                                 get_pipe_name_from_iface(&p->syntax)));
 608                         break;
 609 
 610                 case RPC_CO_CANCEL:
 611                         /* For now just free all client data and continue processing. */
 612                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
 613                         /* As we never do asynchronous RPC serving, we can never cancel a
 614                            call (as far as I know). If we ever did we'd have to send a cancel_ack
 615                            reply. For now, just free all client data and continue processing. */
 616                         reply = True;
 617                         break;
 618 #if 0
 619                         /* Enable this if we're doing async rpc. */
 620                         /* We must check the call-id matches the outstanding callid. */
 621                         if(pipe_init_outgoing_data(p)) {
 622                                 /* Send a cancel_ack PDU reply. */
 623                                 /* We should probably check the auth-verifier here. */
 624                                 reply = setup_cancel_ack_reply(p, &rpc_in);
 625                         }
 626                         break;
 627 #endif
 628 
 629                 case RPC_ORPHANED:
 630                         /* We should probably check the auth-verifier here.
 631                            For now just free all client data and continue processing. */
 632                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
 633                         reply = True;
 634                         break;
 635 
 636                 default:
 637                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
 638                         break;
 639         }
 640 
 641         /* Reset to little endian. Probably don't need this but it won't hurt. */
 642         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
 643 
 644         if (!reply) {
 645                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
 646                          "pipe %s\n", get_pipe_name_from_iface(&p->syntax)));
 647                 set_incoming_fault(p);
 648                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
 649                 prs_mem_free(&rpc_in);
 650         } else {
 651                 /*
 652                  * Reset the lengths. We're ready for a new pdu.
 653                  */
 654                 TALLOC_FREE(p->in_data.current_in_pdu);
 655                 p->in_data.pdu_needed_len = 0;
 656                 p->in_data.pdu_received_len = 0;
 657         }
 658 
 659         prs_mem_free(&rpc_in);
 660 }
 661 
 662 /****************************************************************************
 663  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
 664 ****************************************************************************/
 665 
 666 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
     /* [<][>][^][v][top][bottom][index][help] */
 667 {
 668         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
 669 
 670         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
 671                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
 672                 (unsigned int)n ));
 673 
 674         if(data_to_copy == 0) {
 675                 /*
 676                  * This is an error - data is being received and there is no
 677                  * space in the PDU. Free the received data and go into the fault state.
 678                  */
 679                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
 680 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
 681                 set_incoming_fault(p);
 682                 return -1;
 683         }
 684 
 685         /*
 686          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
 687          * number of bytes before we can do anything.
 688          */
 689 
 690         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
 691                 /*
 692                  * Always return here. If we have more data then the RPC_HEADER
 693                  * will be processed the next time around the loop.
 694                  */
 695                 return fill_rpc_header(p, data, data_to_copy);
 696         }
 697 
 698         /*
 699          * At this point we know we have at least an RPC_HEADER_LEN amount of data
 700          * stored in current_in_pdu.
 701          */
 702 
 703         /*
 704          * If pdu_needed_len is zero this is a new pdu. 
 705          * Unmarshall the header so we know how much more
 706          * data we need, then loop again.
 707          */
 708 
 709         if(p->in_data.pdu_needed_len == 0) {
 710                 ssize_t rret = unmarshall_rpc_header(p);
 711                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
 712                         return rret;
 713                 }
 714                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
 715                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
 716                    pdu type. Deal with this in process_complete_pdu(). */
 717         }
 718 
 719         /*
 720          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
 721          * Keep reading until we have a full pdu.
 722          */
 723 
 724         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
 725 
 726         /*
 727          * Copy as much of the data as we need into the current_in_pdu buffer.
 728          * pdu_needed_len becomes zero when we have a complete pdu.
 729          */
 730 
 731         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
 732         p->in_data.pdu_received_len += data_to_copy;
 733         p->in_data.pdu_needed_len -= data_to_copy;
 734 
 735         /*
 736          * Do we have a complete PDU ?
 737          * (return the number of bytes handled in the call)
 738          */
 739 
 740         if(p->in_data.pdu_needed_len == 0) {
 741                 process_complete_pdu(p);
 742                 return data_to_copy;
 743         }
 744 
 745         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
 746                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
 747 
 748         return (ssize_t)data_to_copy;
 749 }
 750 
 751 /****************************************************************************
 752  Accepts incoming data on an internal rpc pipe.
 753 ****************************************************************************/
 754 
 755 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
     /* [<][>][^][v][top][bottom][index][help] */
 756 {
 757         size_t data_left = n;
 758 
 759         while(data_left) {
 760                 ssize_t data_used;
 761 
 762                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
 763 
 764                 data_used = process_incoming_data(p, data, data_left);
 765 
 766                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
 767 
 768                 if(data_used < 0) {
 769                         return -1;
 770                 }
 771 
 772                 data_left -= data_used;
 773                 data += data_used;
 774         }       
 775 
 776         return n;
 777 }
 778 
 779 /****************************************************************************
 780  Replies to a request to read data from a pipe.
 781 
 782  Headers are interspersed with the data at PDU intervals. By the time
 783  this function is called, the start of the data could possibly have been
 784  read by an SMBtrans (file_offset != 0).
 785 
 786  Calling create_rpc_reply() here is a hack. The data should already
 787  have been prepared into arrays of headers + data stream sections.
 788 ****************************************************************************/
 789 
 790 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
     /* [<][>][^][v][top][bottom][index][help] */
 791                                        bool *is_data_outstanding)
 792 {
 793         uint32 pdu_remaining = 0;
 794         ssize_t data_returned = 0;
 795 
 796         if (!p) {
 797                 DEBUG(0,("read_from_pipe: pipe not open\n"));
 798                 return -1;              
 799         }
 800 
 801         DEBUG(6,(" name: %s len: %u\n", get_pipe_name_from_iface(&p->syntax),
 802                  (unsigned int)n));
 803 
 804         /*
 805          * We cannot return more than one PDU length per
 806          * read request.
 807          */
 808 
 809         /*
 810          * This condition should result in the connection being closed.  
 811          * Netapp filers seem to set it to 0xffff which results in domain
 812          * authentications failing.  Just ignore it so things work.
 813          */
 814 
 815         if(n > RPC_MAX_PDU_FRAG_LEN) {
 816                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
 817                          "pipe %s. We can only service %d sized reads.\n",
 818                          (unsigned int)n, get_pipe_name_from_iface(&p->syntax),
 819                          RPC_MAX_PDU_FRAG_LEN ));
 820                 n = RPC_MAX_PDU_FRAG_LEN;
 821         }
 822 
 823         /*
 824          * Determine if there is still data to send in the
 825          * pipe PDU buffer. Always send this first. Never
 826          * send more than is left in the current PDU. The
 827          * client should send a new read request for a new
 828          * PDU.
 829          */
 830 
 831         pdu_remaining = prs_offset(&p->out_data.frag)
 832                 - p->out_data.current_pdu_sent;
 833 
 834         if (pdu_remaining > 0) {
 835                 data_returned = (ssize_t)MIN(n, pdu_remaining);
 836 
 837                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
 838                           "current_pdu_sent = %u returning %d bytes.\n",
 839                           get_pipe_name_from_iface(&p->syntax),
 840                           (unsigned int)prs_offset(&p->out_data.frag),
 841                           (unsigned int)p->out_data.current_pdu_sent,
 842                           (int)data_returned));
 843 
 844                 memcpy(data,
 845                        prs_data_p(&p->out_data.frag)
 846                        + p->out_data.current_pdu_sent,
 847                        data_returned);
 848 
 849                 p->out_data.current_pdu_sent += (uint32)data_returned;
 850                 goto out;
 851         }
 852 
 853         /*
 854          * At this point p->current_pdu_len == p->current_pdu_sent (which
 855          * may of course be zero if this is the first return fragment.
 856          */
 857 
 858         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
 859                   "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
 860                   get_pipe_name_from_iface(&p->syntax), (int)p->fault_state,
 861                   (unsigned int)p->out_data.data_sent_length,
 862                   (unsigned int)prs_offset(&p->out_data.rdata) ));
 863 
 864         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
 865                 /*
 866                  * We have sent all possible data, return 0.
 867                  */
 868                 data_returned = 0;
 869                 goto out;
 870         }
 871 
 872         /*
 873          * We need to create a new PDU from the data left in p->rdata.
 874          * Create the header/data/footers. This also sets up the fields
 875          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
 876          * and stores the outgoing PDU in p->current_pdu.
 877          */
 878 
 879         if(!create_next_pdu(p)) {
 880                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
 881                          get_pipe_name_from_iface(&p->syntax)));
 882                 return -1;
 883         }
 884 
 885         data_returned = MIN(n, prs_offset(&p->out_data.frag));
 886 
 887         memcpy( data, prs_data_p(&p->out_data.frag), (size_t)data_returned);
 888         p->out_data.current_pdu_sent += (uint32)data_returned;
 889 
 890   out:
 891         (*is_data_outstanding) = prs_offset(&p->out_data.frag) > n;
 892 
 893         if (p->out_data.current_pdu_sent == prs_offset(&p->out_data.frag)) {
 894                 /* We've returned everything in the out_data.frag
 895                  * so we're done with this pdu. Free it and reset
 896                  * current_pdu_sent. */
 897                 p->out_data.current_pdu_sent = 0;
 898                 prs_mem_free(&p->out_data.frag);
 899         }
 900         return data_returned;
 901 }
 902 
 903 /****************************************************************************
 904  Close an rpc pipe.
 905 ****************************************************************************/
 906 
 907 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
     /* [<][>][^][v][top][bottom][index][help] */
 908 {
 909         if (!p) {
 910                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
 911                 return False;
 912         }
 913 
 914         prs_mem_free(&p->out_data.frag);
 915         prs_mem_free(&p->out_data.rdata);
 916         prs_mem_free(&p->in_data.data);
 917 
 918         if (p->auth.auth_data_free_func) {
 919                 (*p->auth.auth_data_free_func)(&p->auth);
 920         }
 921 
 922         TALLOC_FREE(p->mem_ctx);
 923 
 924         free_pipe_rpc_context( p->contexts );
 925 
 926         /* Free the handles database. */
 927         close_policy_by_pipe(p);
 928 
 929         DLIST_REMOVE(InternalPipes, p);
 930 
 931         ZERO_STRUCTP(p);
 932 
 933         TALLOC_FREE(p);
 934         
 935         return True;
 936 }
 937 
 938 bool fsp_is_np(struct files_struct *fsp)
     /* [<][>][^][v][top][bottom][index][help] */
 939 {
 940         enum FAKE_FILE_TYPE type;
 941 
 942         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
 943                 return false;
 944         }
 945 
 946         type = fsp->fake_file_handle->type;
 947 
 948         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
 949                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
 950 }
 951 
 952 struct np_proxy_state {
 953         struct tevent_queue *read_queue;
 954         struct tevent_queue *write_queue;
 955         int fd;
 956 
 957         uint8_t *msg;
 958         size_t sent;
 959 };
 960 
 961 static int np_proxy_state_destructor(struct np_proxy_state *state)
     /* [<][>][^][v][top][bottom][index][help] */
 962 {
 963         if (state->fd != -1) {
 964                 close(state->fd);
 965         }
 966         return 0;
 967 }
 968 
 969 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
 970                                                        const char *pipe_name,
 971                                                        struct auth_serversupplied_info *server_info)
 972 {
 973         struct np_proxy_state *result;
 974         struct sockaddr_un addr;
 975         char *socket_path;
 976         const char *socket_dir;
 977 
 978         DATA_BLOB req_blob;
 979         struct netr_SamInfo3 *info3;
 980         struct named_pipe_auth_req req;
 981         DATA_BLOB rep_blob;
 982         uint8 rep_buf[20];
 983         struct named_pipe_auth_rep rep;
 984         enum ndr_err_code ndr_err;
 985         NTSTATUS status;
 986         ssize_t written;
 987 
 988         result = talloc(mem_ctx, struct np_proxy_state);
 989         if (result == NULL) {
 990                 DEBUG(0, ("talloc failed\n"));
 991                 return NULL;
 992         }
 993 
 994         result->fd = socket(AF_UNIX, SOCK_STREAM, 0);
 995         if (result->fd == -1) {
 996                 DEBUG(10, ("socket(2) failed: %s\n", strerror(errno)));
 997                 goto fail;
 998         }
 999         talloc_set_destructor(result, np_proxy_state_destructor);
1000 
1001         ZERO_STRUCT(addr);
1002         addr.sun_family = AF_UNIX;
1003 
1004         socket_dir = lp_parm_const_string(
1005                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
1006                 get_dyn_NCALRPCDIR());
1007         if (socket_dir == NULL) {
1008                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
1009                 goto fail;
1010         }
1011 
1012         socket_path = talloc_asprintf(talloc_tos(), "%s/np/%s",
1013                                       socket_dir, pipe_name);
1014         if (socket_path == NULL) {
1015                 DEBUG(0, ("talloc_asprintf failed\n"));
1016                 goto fail;
1017         }
1018         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
1019         TALLOC_FREE(socket_path);
1020 
1021         become_root();
1022         if (sys_connect(result->fd, (struct sockaddr *)&addr) == -1) {
1023                 unbecome_root();
1024                 DEBUG(0, ("connect(%s) failed: %s\n", addr.sun_path,
1025                           strerror(errno)));
1026                 goto fail;
1027         }
1028         unbecome_root();
1029 
1030         info3 = talloc(talloc_tos(), struct netr_SamInfo3);
1031         if (info3 == NULL) {
1032                 DEBUG(0, ("talloc failed\n"));
1033                 goto fail;
1034         }
1035 
1036         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
1037         if (!NT_STATUS_IS_OK(status)) {
1038                 TALLOC_FREE(info3);
1039                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
1040                           nt_errstr(status)));
1041                 goto fail;
1042         }
1043 
1044         req.level = 1;
1045         req.info.info1 = *info3;
1046 
1047         ndr_err = ndr_push_struct_blob(
1048                 &req_blob, talloc_tos(), NULL, &req,
1049                 (ndr_push_flags_fn_t)ndr_push_named_pipe_auth_req);
1050 
1051         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1052                 DEBUG(10, ("ndr_push_named_pipe_auth_req failed: %s\n",
1053                            ndr_errstr(ndr_err)));
1054                 goto fail;
1055         }
1056 
1057         DEBUG(10, ("named_pipe_auth_req(client)[%u]\n", (uint32_t)req_blob.length));
1058         dump_data(10, req_blob.data, req_blob.length);
1059 
1060         written = write_data(result->fd, (char *)req_blob.data,
1061                              req_blob.length);
1062         if (written == -1) {
1063                 DEBUG(3, ("Could not write auth req data to RPC server\n"));
1064                 goto fail;
1065         }
1066 
1067         status = read_data(result->fd, (char *)rep_buf, sizeof(rep_buf));
1068         if (!NT_STATUS_IS_OK(status)) {
1069                 DEBUG(3, ("Could not read auth result\n"));
1070                 goto fail;
1071         }
1072 
1073         rep_blob = data_blob_const(rep_buf, sizeof(rep_buf));
1074 
1075         DEBUG(10,("name_pipe_auth_rep(client)[%u]\n", (uint32_t)rep_blob.length));
1076         dump_data(10, rep_blob.data, rep_blob.length);
1077 
1078         ndr_err = ndr_pull_struct_blob(
1079                 &rep_blob, talloc_tos(), NULL, &rep,
1080                 (ndr_pull_flags_fn_t)ndr_pull_named_pipe_auth_rep);
1081 
1082         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1083                 DEBUG(0, ("ndr_pull_named_pipe_auth_rep failed: %s\n",
1084                           ndr_errstr(ndr_err)));
1085                 goto fail;
1086         }
1087 
1088         if (rep.length != 16) {
1089                 DEBUG(0, ("req invalid length: %u != 16\n",
1090                           rep.length));
1091                 goto fail;
1092         }
1093 
1094         if (strcmp(NAMED_PIPE_AUTH_MAGIC, rep.magic) != 0) {
1095                 DEBUG(0, ("req invalid magic: %s != %s\n",
1096                           rep.magic, NAMED_PIPE_AUTH_MAGIC));
1097                 goto fail;
1098         }
1099 
1100         if (!NT_STATUS_IS_OK(rep.status)) {
1101                 DEBUG(0, ("req failed: %s\n",
1102                           nt_errstr(rep.status)));
1103                 goto fail;
1104         }
1105 
1106         if (rep.level != 1) {
1107                 DEBUG(0, ("req invalid level: %u != 1\n",
1108                           rep.level));
1109                 goto fail;
1110         }
1111 
1112         result->msg = NULL;
1113 
1114         result->read_queue = tevent_queue_create(result, "np_read");
1115         if (result->read_queue == NULL) {
1116                 goto fail;
1117         }
1118         result->write_queue = tevent_queue_create(result, "np_write");
1119         if (result->write_queue == NULL) {
1120                 goto fail;
1121         }
1122 
1123         return result;
1124 
1125  fail:
1126         TALLOC_FREE(result);
1127         return NULL;
1128 }
1129 
1130 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
     /* [<][>][^][v][top][bottom][index][help] */
1131                  const char *client_address,
1132                  struct auth_serversupplied_info *server_info,
1133                  struct fake_file_handle **phandle)
1134 {
1135         const char **proxy_list;
1136         struct fake_file_handle *handle;
1137 
1138         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1139 
1140         handle = talloc(mem_ctx, struct fake_file_handle);
1141         if (handle == NULL) {
1142                 return NT_STATUS_NO_MEMORY;
1143         }
1144 
1145         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1146                 struct np_proxy_state *p;
1147 
1148                 p = make_external_rpc_pipe_p(handle, name, server_info);
1149 
1150                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1151                 handle->private_data = p;
1152         } else {
1153                 struct pipes_struct *p;
1154                 struct ndr_syntax_id syntax;
1155 
1156                 if (!is_known_pipename(name, &syntax)) {
1157                         TALLOC_FREE(handle);
1158                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1159                 }
1160 
1161                 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1162                                              server_info);
1163 
1164                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1165                 handle->private_data = p;
1166         }
1167 
1168         if (handle->private_data == NULL) {
1169                 TALLOC_FREE(handle);
1170                 return NT_STATUS_PIPE_NOT_AVAILABLE;
1171         }
1172 
1173         *phandle = handle;
1174 
1175         return NT_STATUS_OK;
1176 }
1177 
1178 struct np_write_state {
1179         struct event_context *ev;
1180         struct np_proxy_state *p;
1181         struct iovec iov;
1182         ssize_t nwritten;
1183 };
1184 
1185 static void np_write_done(struct tevent_req *subreq);
1186 
1187 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
     /* [<][>][^][v][top][bottom][index][help] */
1188                                  struct fake_file_handle *handle,
1189                                  const uint8_t *data, size_t len)
1190 {
1191         struct tevent_req *req;
1192         struct np_write_state *state;
1193         NTSTATUS status;
1194 
1195         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1196         dump_data(50, data, len);
1197 
1198         req = tevent_req_create(mem_ctx, &state, struct np_write_state);
1199         if (req == NULL) {
1200                 return NULL;
1201         }
1202 
1203         if (len == 0) {
1204                 state->nwritten = 0;
1205                 status = NT_STATUS_OK;
1206                 goto post_status;
1207         }
1208 
1209         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1210                 struct pipes_struct *p = talloc_get_type_abort(
1211                         handle->private_data, struct pipes_struct);
1212 
1213                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1214 
1215                 status = (state->nwritten >= 0)
1216                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1217                 goto post_status;
1218         }
1219 
1220         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1221                 struct np_proxy_state *p = talloc_get_type_abort(
1222                         handle->private_data, struct np_proxy_state);
1223                 struct tevent_req *subreq;
1224 
1225                 state->ev = ev;
1226                 state->p = p;
1227                 state->iov.iov_base = CONST_DISCARD(void *, data);
1228                 state->iov.iov_len = len;
1229 
1230                 subreq = writev_send(state, ev, p->write_queue, p->fd,
1231                                      &state->iov, 1);
1232                 if (subreq == NULL) {
1233                         goto fail;
1234                 }
1235                 tevent_req_set_callback(subreq, np_write_done, req);
1236                 return req;
1237         }
1238 
1239         status = NT_STATUS_INVALID_HANDLE;
1240  post_status:
1241         if (NT_STATUS_IS_OK(status)) {
1242                 tevent_req_done(req);
1243         } else {
1244                 tevent_req_nterror(req, status);
1245         }
1246         return tevent_req_post(req, ev);
1247  fail:
1248         TALLOC_FREE(req);
1249         return NULL;
1250 }
1251 
1252 static void np_write_done(struct tevent_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
1253 {
1254         struct tevent_req *req = tevent_req_callback_data(
1255                 subreq, struct tevent_req);
1256         struct np_write_state *state = tevent_req_data(
1257                 req, struct np_write_state);
1258         ssize_t received;
1259         int err;
1260 
1261         received = writev_recv(subreq, &err);
1262         if (received < 0) {
1263                 tevent_req_nterror(req, map_nt_error_from_unix(err));
1264                 return;
1265         }
1266         state->nwritten = received;
1267         tevent_req_done(req);
1268 }
1269 
1270 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
     /* [<][>][^][v][top][bottom][index][help] */
1271 {
1272         struct np_write_state *state = tevent_req_data(
1273                 req, struct np_write_state);
1274         NTSTATUS status;
1275 
1276         if (tevent_req_is_nterror(req, &status)) {
1277                 return status;
1278         }
1279         *pnwritten = state->nwritten;
1280         return NT_STATUS_OK;
1281 }
1282 
1283 static ssize_t rpc_frag_more_fn(uint8_t *buf, size_t buflen, void *priv)
     /* [<][>][^][v][top][bottom][index][help] */
1284 {
1285         prs_struct hdr_prs;
1286         struct rpc_hdr_info hdr;
1287         bool ret;
1288 
1289         if (buflen > RPC_HEADER_LEN) {
1290                 return 0;
1291         }
1292         prs_init_empty(&hdr_prs, talloc_tos(), UNMARSHALL);
1293         prs_give_memory(&hdr_prs, (char *)buf, RPC_HEADER_LEN, false);
1294         ret = smb_io_rpc_hdr("", &hdr, &hdr_prs, 0);
1295         prs_mem_free(&hdr_prs);
1296 
1297         if (!ret) {
1298                 return -1;
1299         }
1300 
1301         return (hdr.frag_len - RPC_HEADER_LEN);
1302 }
1303 
1304 struct np_read_state {
1305         struct event_context *ev;
1306         struct np_proxy_state *p;
1307         uint8_t *data;
1308         size_t len;
1309 
1310         size_t nread;
1311         bool is_data_outstanding;
1312 };
1313 
1314 static void np_read_trigger(struct tevent_req *req, void *private_data);
1315 static void np_read_done(struct tevent_req *subreq);
1316 
1317 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
     /* [<][>][^][v][top][bottom][index][help] */
1318                                 struct fake_file_handle *handle,
1319                                 uint8_t *data, size_t len)
1320 {
1321         struct tevent_req *req;
1322         struct np_read_state *state;
1323         NTSTATUS status;
1324 
1325         req = tevent_req_create(mem_ctx, &state, struct np_read_state);
1326         if (req == NULL) {
1327                 return NULL;
1328         }
1329 
1330         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1331                 struct pipes_struct *p = talloc_get_type_abort(
1332                         handle->private_data, struct pipes_struct);
1333 
1334                 state->nread = read_from_internal_pipe(
1335                         p, (char *)data, len, &state->is_data_outstanding);
1336 
1337                 status = (state->nread >= 0)
1338                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1339                 goto post_status;
1340         }
1341 
1342         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1343                 struct np_proxy_state *p = talloc_get_type_abort(
1344                         handle->private_data, struct np_proxy_state);
1345 
1346                 if (p->msg != NULL) {
1347                         size_t thistime;
1348 
1349                         thistime = MIN(talloc_get_size(p->msg) - p->sent,
1350                                        len);
1351 
1352                         memcpy(data, p->msg+p->sent, thistime);
1353                         state->nread = thistime;
1354                         p->sent += thistime;
1355 
1356                         if (p->sent < talloc_get_size(p->msg)) {
1357                                 state->is_data_outstanding = true;
1358                         } else {
1359                                 state->is_data_outstanding = false;
1360                                 TALLOC_FREE(p->msg);
1361                         }
1362                         status = NT_STATUS_OK;
1363                         goto post_status;
1364                 }
1365 
1366                 state->ev = ev;
1367                 state->p = p;
1368                 state->data = data;
1369                 state->len = len;
1370 
1371                 if (!tevent_queue_add(p->read_queue, ev, req, np_read_trigger,
1372                                       NULL)) {
1373                         goto fail;
1374                 }
1375                 return req;
1376         }
1377 
1378         status = NT_STATUS_INVALID_HANDLE;
1379  post_status:
1380         if (NT_STATUS_IS_OK(status)) {
1381                 tevent_req_done(req);
1382         } else {
1383                 tevent_req_nterror(req, status);
1384         }
1385         return tevent_req_post(req, ev);
1386  fail:
1387         TALLOC_FREE(req);
1388         return NULL;
1389 }
1390 
1391 static void np_read_trigger(struct tevent_req *req, void *private_data)
     /* [<][>][^][v][top][bottom][index][help] */
1392 {
1393         struct np_read_state *state = tevent_req_data(
1394                 req, struct np_read_state);
1395         struct tevent_req *subreq;
1396 
1397         subreq = read_packet_send(state, state->ev, state->p->fd,
1398                                   RPC_HEADER_LEN, rpc_frag_more_fn, NULL);
1399         if (tevent_req_nomem(subreq, req)) {
1400                 return;
1401         }
1402         tevent_req_set_callback(subreq, np_read_done, req);
1403 }
1404 
1405 static void np_read_done(struct tevent_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
1406 {
1407         struct tevent_req *req = tevent_req_callback_data(
1408                 subreq, struct tevent_req);
1409         struct np_read_state *state = tevent_req_data(
1410                 req, struct np_read_state);
1411         ssize_t received;
1412         size_t thistime;
1413         int err;
1414 
1415         received = read_packet_recv(subreq, state->p, &state->p->msg, &err);
1416         TALLOC_FREE(subreq);
1417         if (received == -1) {
1418                 tevent_req_nterror(req, map_nt_error_from_unix(err));
1419                 return;
1420         }
1421 
1422         thistime = MIN(received, state->len);
1423 
1424         memcpy(state->data, state->p->msg, thistime);
1425         state->p->sent = thistime;
1426         state->nread = thistime;
1427 
1428         if (state->p->sent < received) {
1429                 state->is_data_outstanding = true;
1430         } else {
1431                 TALLOC_FREE(state->p->msg);
1432                 state->is_data_outstanding = false;
1433         }
1434 
1435         tevent_req_done(req);
1436         return;
1437 }
1438 
1439 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
     /* [<][>][^][v][top][bottom][index][help] */
1440                       bool *is_data_outstanding)
1441 {
1442         struct np_read_state *state = tevent_req_data(
1443                 req, struct np_read_state);
1444         NTSTATUS status;
1445 
1446         if (tevent_req_is_nterror(req, &status)) {
1447                 return status;
1448         }
1449         *nread = state->nread;
1450         *is_data_outstanding = state->is_data_outstanding;
1451         return NT_STATUS_OK;
1452 }
1453 
1454 /**
1455  * Create a new RPC client context which uses a local dispatch function.
1456  */
1457 NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
1458                                 const struct ndr_syntax_id *abstract_syntax,
1459                                 NTSTATUS (*dispatch) (struct rpc_pipe_client *cli,
1460                                                       TALLOC_CTX *mem_ctx,
1461                                                       const struct ndr_interface_table *table,
1462                                                       uint32_t opnum, void *r),
1463                                 struct auth_serversupplied_info *serversupplied_info,
1464                                 struct rpc_pipe_client **presult)
1465 {
1466         struct rpc_pipe_client *result;
1467 
1468         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
1469         if (result == NULL) {
1470                 return NT_STATUS_NO_MEMORY;
1471         }
1472 
1473         result->abstract_syntax = *abstract_syntax;
1474         result->transfer_syntax = ndr_transfer_syntax;
1475         result->dispatch = dispatch;
1476 
1477         result->pipes_struct = make_internal_rpc_pipe_p(
1478                 result, abstract_syntax, "", serversupplied_info);
1479         if (result->pipes_struct == NULL) {
1480                 TALLOC_FREE(result);
1481                 return NT_STATUS_NO_MEMORY;
1482         }
1483 
1484         result->max_xmit_frag = -1;
1485         result->max_recv_frag = -1;
1486 
1487         *presult = result;
1488         return NT_STATUS_OK;
1489 }

/* [<][>][^][v][top][bottom][index][help] */