root/source3/rpc_client/cli_pipe.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_pipe_name_from_iface
  2. map_pipe_auth_type_to_rpc_auth_type
  3. rpccli_pipe_txt
  4. get_rpc_call_id
  5. rpc_grow_buffer
  6. rpc_read_send
  7. rpc_read_done
  8. rpc_read_recv
  9. rpc_write_send
  10. rpc_write_done
  11. rpc_write_recv
  12. parse_rpc_header
  13. get_complete_frag_send
  14. get_complete_frag_got_header
  15. get_complete_frag_got_rest
  16. get_complete_frag_recv
  17. cli_pipe_verify_ntlmssp
  18. cli_pipe_verify_schannel
  19. cli_pipe_validate_rpc_response
  20. cli_pipe_validate_current_pdu
  21. cli_pipe_reset_current_pdu
  22. cli_api_pipe_send
  23. cli_api_pipe_trans_done
  24. cli_api_pipe_write_done
  25. cli_api_pipe_read_done
  26. cli_api_pipe_recv
  27. rpc_api_pipe_state_destructor
  28. rpc_api_pipe_send
  29. rpc_api_pipe_trans_done
  30. rpc_api_pipe_got_pdu
  31. rpc_api_pipe_recv
  32. create_krb5_auth_bind_req
  33. create_spnego_ntlmssp_auth_rpc_bind_req
  34. create_ntlmssp_auth_rpc_bind_req
  35. create_schannel_auth_rpc_bind_req
  36. create_bind_or_alt_ctx_internal
  37. create_rpc_bind_req
  38. add_ntlmssp_auth_footer
  39. add_schannel_auth_footer
  40. calculate_data_len_tosend
  41. rpc_api_pipe_req_state_destructor
  42. rpc_api_pipe_req_send
  43. prepare_next_frag
  44. rpc_api_pipe_req_write_done
  45. rpc_api_pipe_req_done
  46. rpc_api_pipe_req_recv
  47. rpc_api_pipe_req
  48. rpc_pipe_set_hnd_state
  49. check_bind_response
  50. create_rpc_bind_auth3
  51. create_rpc_alter_context
  52. rpc_pipe_bind_state_destructor
  53. rpc_pipe_bind_send
  54. rpc_pipe_bind_step_one_done
  55. rpc_finish_auth3_bind_send
  56. rpc_bind_auth3_write_done
  57. rpc_finish_spnego_ntlmssp_bind_send
  58. rpc_bind_ntlmssp_api_done
  59. rpc_pipe_bind_recv
  60. rpc_pipe_bind
  61. rpccli_set_timeout
  62. rpccli_get_pwd_hash
  63. rpccli_anon_bind_data
  64. cli_auth_ntlmssp_data_destructor
  65. rpccli_ntlmssp_bind_data
  66. rpccli_schannel_bind_data
  67. cli_auth_kerberos_data_destructor
  68. rpccli_kerberos_bind_data
  69. rpc_pipe_open_tcp_port
  70. rpc_pipe_get_tcp_port
  71. rpc_pipe_open_tcp
  72. rpc_pipe_open_ncalrpc
  73. rpc_pipe_client_np_destructor
  74. rpc_pipe_open_np
  75. rpc_pipe_open_local
  76. cli_rpc_pipe_open
  77. cli_rpc_pipe_open_noauth_transport
  78. cli_rpc_pipe_open_noauth
  79. cli_rpc_pipe_open_ntlmssp_internal
  80. cli_rpc_pipe_open_ntlmssp
  81. cli_rpc_pipe_open_spnego_ntlmssp
  82. get_schannel_session_key_common
  83. get_schannel_session_key
  84. cli_rpc_pipe_open_schannel_with_key
  85. get_schannel_session_key_auth_ntlmssp
  86. cli_rpc_pipe_open_ntlmssp_auth_schannel
  87. cli_rpc_pipe_open_schannel
  88. cli_rpc_pipe_open_krb5
  89. cli_get_session_key

   1 /* 
   2  *  Unix SMB/CIFS implementation.
   3  *  RPC Pipe client / server routines
   4  *  Largely rewritten by Jeremy Allison             2005.
   5  *  
   6  *  This program is free software; you can redistribute it and/or modify
   7  *  it under the terms of the GNU General Public License as published by
   8  *  the Free Software Foundation; either version 3 of the License, or
   9  *  (at your option) any later version.
  10  *  
  11  *  This program is distributed in the hope that it will be useful,
  12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14  *  GNU General Public License for more details.
  15  *  
  16  *  You should have received a copy of the GNU General Public License
  17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  18  */
  19 
  20 #include "includes.h"
  21 #include "librpc/gen_ndr/cli_epmapper.h"
  22 
  23 #undef DBGC_CLASS
  24 #define DBGC_CLASS DBGC_RPC_CLI
  25 
  26 /*******************************************************************
  27 interface/version dce/rpc pipe identification
  28 ********************************************************************/
  29 
  30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
  31 #define PIPE_SAMR     "\\PIPE\\samr"
  32 #define PIPE_WINREG   "\\PIPE\\winreg"
  33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
  34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
  35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
  36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
  37 #define PIPE_LSASS    "\\PIPE\\lsass"
  38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
  39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
  40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
  41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
  42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
  43 #define PIPE_EPM      "\\PIPE\\epmapper"
  44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
  45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
  46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
  47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
  48 
  49 /*
  50  * IMPORTANT!!  If you update this structure, make sure to
  51  * update the index #defines in smb.h.
  52  */
  53 
  54 static const struct pipe_id_info {
  55         /* the names appear not to matter: the syntaxes _do_ matter */
  56 
  57         const char *client_pipe;
  58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
  59 } pipe_names [] =
  60 {
  61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
  62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
  63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
  64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
  65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
  66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
  67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
  68         { PIPE_SPOOLSS,         &ndr_table_spoolss.syntax_id },
  69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
  70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
  71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
  72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
  73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
  74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
  75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
  76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
  77         { NULL, NULL }
  78 };
  79 
  80 /****************************************************************************
  81  Return the pipe name from the interface.
  82  ****************************************************************************/
  83 
  84 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
     /* [<][>][^][v][top][bottom][index][help] */
  85 {
  86         char *guid_str;
  87         const char *result;
  88         int i;
  89         for (i = 0; pipe_names[i].client_pipe; i++) {
  90                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
  91                                         interface)) {
  92                         return &pipe_names[i].client_pipe[5];
  93                 }
  94         }
  95 
  96         /*
  97          * Here we should ask \\epmapper, but for now our code is only
  98          * interested in the known pipes mentioned in pipe_names[]
  99          */
 100 
 101         guid_str = GUID_string(talloc_tos(), &interface->uuid);
 102         if (guid_str == NULL) {
 103                 return NULL;
 104         }
 105         result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
 106                                  (int)interface->if_version);
 107         TALLOC_FREE(guid_str);
 108 
 109         if (result == NULL) {
 110                 return "PIPE";
 111         }
 112         return result;
 113 }
 114 
 115 /********************************************************************
 116  Map internal value to wire value.
 117  ********************************************************************/
 118 
 119 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
     /* [<][>][^][v][top][bottom][index][help] */
 120 {
 121         switch (auth_type) {
 122 
 123         case PIPE_AUTH_TYPE_NONE:
 124                 return RPC_ANONYMOUS_AUTH_TYPE;
 125 
 126         case PIPE_AUTH_TYPE_NTLMSSP:
 127                 return RPC_NTLMSSP_AUTH_TYPE;
 128 
 129         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
 130         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
 131                 return RPC_SPNEGO_AUTH_TYPE;
 132 
 133         case PIPE_AUTH_TYPE_SCHANNEL:
 134                 return RPC_SCHANNEL_AUTH_TYPE;
 135 
 136         case PIPE_AUTH_TYPE_KRB5:
 137                 return RPC_KRB5_AUTH_TYPE;
 138 
 139         default:
 140                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
 141                         "auth type %u\n",
 142                         (unsigned int)auth_type ));
 143                 break;
 144         }
 145         return -1;
 146 }
 147 
 148 /********************************************************************
 149  Pipe description for a DEBUG
 150  ********************************************************************/
 151 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
 152                                    struct rpc_pipe_client *cli)
 153 {
 154         char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
 155         if (result == NULL) {
 156                 return "pipe";
 157         }
 158         return result;
 159 }
 160 
 161 /********************************************************************
 162  Rpc pipe call id.
 163  ********************************************************************/
 164 
 165 static uint32 get_rpc_call_id(void)
     /* [<][>][^][v][top][bottom][index][help] */
 166 {
 167         static uint32 call_id = 0;
 168         return ++call_id;
 169 }
 170 
 171 /*
 172  * Realloc pdu to have a least "size" bytes
 173  */
 174 
 175 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
     /* [<][>][^][v][top][bottom][index][help] */
 176 {
 177         size_t extra_size;
 178 
 179         if (prs_data_size(pdu) >= size) {
 180                 return true;
 181         }
 182 
 183         extra_size = size - prs_data_size(pdu);
 184 
 185         if (!prs_force_grow(pdu, extra_size)) {
 186                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
 187                           "%d bytes.\n", (int)extra_size));
 188                 return false;
 189         }
 190 
 191         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
 192                   (int)extra_size, prs_data_size(pdu)));
 193         return true;
 194 }
 195 
 196 
 197 /*******************************************************************
 198  Use SMBreadX to get rest of one fragment's worth of rpc data.
 199  Reads the whole size or give an error message
 200  ********************************************************************/
 201 
 202 struct rpc_read_state {
 203         struct event_context *ev;
 204         struct rpc_cli_transport *transport;
 205         uint8_t *data;
 206         size_t size;
 207         size_t num_read;
 208 };
 209 
 210 static void rpc_read_done(struct async_req *subreq);
 211 
 212 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
 213                                        struct event_context *ev,
 214                                        struct rpc_cli_transport *transport,
 215                                        uint8_t *data, size_t size)
 216 {
 217         struct async_req *result, *subreq;
 218         struct rpc_read_state *state;
 219 
 220         if (!async_req_setup(mem_ctx, &result, &state,
 221                              struct rpc_read_state)) {
 222                 return NULL;
 223         }
 224         state->ev = ev;
 225         state->transport = transport;
 226         state->data = data;
 227         state->size = size;
 228         state->num_read = 0;
 229 
 230         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
 231 
 232         subreq = transport->read_send(state, ev, (uint8_t *)data, size,
 233                                       transport->priv);
 234         if (subreq == NULL) {
 235                 goto fail;
 236         }
 237         subreq->async.fn = rpc_read_done;
 238         subreq->async.priv = result;
 239         return result;
 240 
 241  fail:
 242         TALLOC_FREE(result);
 243         return NULL;
 244 }
 245 
 246 static void rpc_read_done(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
 247 {
 248         struct async_req *req = talloc_get_type_abort(
 249                 subreq->async.priv, struct async_req);
 250         struct rpc_read_state *state = talloc_get_type_abort(
 251                 req->private_data, struct rpc_read_state);
 252         NTSTATUS status;
 253         ssize_t received;
 254 
 255         status = state->transport->read_recv(subreq, &received);
 256         TALLOC_FREE(subreq);
 257         if (!NT_STATUS_IS_OK(status)) {
 258                 async_req_nterror(req, status);
 259                 return;
 260         }
 261 
 262         state->num_read += received;
 263         if (state->num_read == state->size) {
 264                 async_req_done(req);
 265                 return;
 266         }
 267 
 268         subreq = state->transport->read_send(state, state->ev,
 269                                              state->data + state->num_read,
 270                                              state->size - state->num_read,
 271                                              state->transport->priv);
 272         if (async_req_nomem(subreq, req)) {
 273                 return;
 274         }
 275         subreq->async.fn = rpc_read_done;
 276         subreq->async.priv = req;
 277 }
 278 
 279 static NTSTATUS rpc_read_recv(struct async_req *req)
     /* [<][>][^][v][top][bottom][index][help] */
 280 {
 281         return async_req_simple_recv_ntstatus(req);
 282 }
 283 
 284 struct rpc_write_state {
 285         struct event_context *ev;
 286         struct rpc_cli_transport *transport;
 287         const uint8_t *data;
 288         size_t size;
 289         size_t num_written;
 290 };
 291 
 292 static void rpc_write_done(struct async_req *subreq);
 293 
 294 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
 295                                         struct event_context *ev,
 296                                         struct rpc_cli_transport *transport,
 297                                         const uint8_t *data, size_t size)
 298 {
 299         struct async_req *result, *subreq;
 300         struct rpc_write_state *state;
 301 
 302         if (!async_req_setup(mem_ctx, &result, &state,
 303                              struct rpc_write_state)) {
 304                 return NULL;
 305         }
 306         state->ev = ev;
 307         state->transport = transport;
 308         state->data = data;
 309         state->size = size;
 310         state->num_written = 0;
 311 
 312         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
 313 
 314         subreq = transport->write_send(state, ev, data, size, transport->priv);
 315         if (subreq == NULL) {
 316                 goto fail;
 317         }
 318         subreq->async.fn = rpc_write_done;
 319         subreq->async.priv = result;
 320         return result;
 321  fail:
 322         TALLOC_FREE(result);
 323         return NULL;
 324 }
 325 
 326 static void rpc_write_done(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
 327 {
 328         struct async_req *req = talloc_get_type_abort(
 329                 subreq->async.priv, struct async_req);
 330         struct rpc_write_state *state = talloc_get_type_abort(
 331                 req->private_data, struct rpc_write_state);
 332         NTSTATUS status;
 333         ssize_t written;
 334 
 335         status = state->transport->write_recv(subreq, &written);
 336         TALLOC_FREE(subreq);
 337         if (!NT_STATUS_IS_OK(status)) {
 338                 async_req_nterror(req, status);
 339                 return;
 340         }
 341 
 342         state->num_written += written;
 343 
 344         if (state->num_written == state->size) {
 345                 async_req_done(req);
 346                 return;
 347         }
 348 
 349         subreq = state->transport->write_send(state, state->ev,
 350                                               state->data + state->num_written,
 351                                               state->size - state->num_written,
 352                                               state->transport->priv);
 353         if (async_req_nomem(subreq, req)) {
 354                 return;
 355         }
 356         subreq->async.fn = rpc_write_done;
 357         subreq->async.priv = req;
 358 }
 359 
 360 static NTSTATUS rpc_write_recv(struct async_req *req)
     /* [<][>][^][v][top][bottom][index][help] */
 361 {
 362         return async_req_simple_recv_ntstatus(req);
 363 }
 364 
 365 
 366 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
 367                                  struct rpc_hdr_info *prhdr,
 368                                  prs_struct *pdu)
 369 {
 370         /*
 371          * This next call sets the endian bit correctly in current_pdu. We
 372          * will propagate this to rbuf later.
 373          */
 374 
 375         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
 376                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
 377                 return NT_STATUS_BUFFER_TOO_SMALL;
 378         }
 379 
 380         if (prhdr->frag_len > cli->max_recv_frag) {
 381                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
 382                           " we only allow %d\n", (int)prhdr->frag_len,
 383                           (int)cli->max_recv_frag));
 384                 return NT_STATUS_BUFFER_TOO_SMALL;
 385         }
 386 
 387         return NT_STATUS_OK;
 388 }
 389 
 390 /****************************************************************************
 391  Try and get a PDU's worth of data from current_pdu. If not, then read more
 392  from the wire.
 393  ****************************************************************************/
 394 
 395 struct get_complete_frag_state {
 396         struct event_context *ev;
 397         struct rpc_pipe_client *cli;
 398         struct rpc_hdr_info *prhdr;
 399         prs_struct *pdu;
 400 };
 401 
 402 static void get_complete_frag_got_header(struct async_req *subreq);
 403 static void get_complete_frag_got_rest(struct async_req *subreq);
 404 
 405 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
 406                                                struct event_context *ev,
 407                                                struct rpc_pipe_client *cli,
 408                                                struct rpc_hdr_info *prhdr,
 409                                                prs_struct *pdu)
 410 {
 411         struct async_req *result, *subreq;
 412         struct get_complete_frag_state *state;
 413         uint32_t pdu_len;
 414         NTSTATUS status;
 415 
 416         if (!async_req_setup(mem_ctx, &result, &state,
 417                              struct get_complete_frag_state)) {
 418                 return NULL;
 419         }
 420         state->ev = ev;
 421         state->cli = cli;
 422         state->prhdr = prhdr;
 423         state->pdu = pdu;
 424 
 425         pdu_len = prs_data_size(pdu);
 426         if (pdu_len < RPC_HEADER_LEN) {
 427                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
 428                         status = NT_STATUS_NO_MEMORY;
 429                         goto post_status;
 430                 }
 431                 subreq = rpc_read_send(
 432                         state, state->ev,
 433                         state->cli->transport,
 434                         (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
 435                         RPC_HEADER_LEN - pdu_len);
 436                 if (subreq == NULL) {
 437                         status = NT_STATUS_NO_MEMORY;
 438                         goto post_status;
 439                 }
 440                 subreq->async.fn = get_complete_frag_got_header;
 441                 subreq->async.priv = result;
 442                 return result;
 443         }
 444 
 445         status = parse_rpc_header(cli, prhdr, pdu);
 446         if (!NT_STATUS_IS_OK(status)) {
 447                 goto post_status;
 448         }
 449 
 450         /*
 451          * Ensure we have frag_len bytes of data.
 452          */
 453         if (pdu_len < prhdr->frag_len) {
 454                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
 455                         status = NT_STATUS_NO_MEMORY;
 456                         goto post_status;
 457                 }
 458                 subreq = rpc_read_send(state, state->ev,
 459                                        state->cli->transport,
 460                                        (uint8_t *)(prs_data_p(pdu) + pdu_len),
 461                                        prhdr->frag_len - pdu_len);
 462                 if (subreq == NULL) {
 463                         status = NT_STATUS_NO_MEMORY;
 464                         goto post_status;
 465                 }
 466                 subreq->async.fn = get_complete_frag_got_rest;
 467                 subreq->async.priv = result;
 468                 return result;
 469         }
 470 
 471         status = NT_STATUS_OK;
 472  post_status:
 473         if (async_post_ntstatus(result, ev, status)) {
 474                 return result;
 475         }
 476         TALLOC_FREE(result);
 477         return NULL;
 478 }
 479 
 480 static void get_complete_frag_got_header(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
 481 {
 482         struct async_req *req = talloc_get_type_abort(
 483                 subreq->async.priv, struct async_req);
 484         struct get_complete_frag_state *state = talloc_get_type_abort(
 485                 req->private_data, struct get_complete_frag_state);
 486         NTSTATUS status;
 487 
 488         status = rpc_read_recv(subreq);
 489         TALLOC_FREE(subreq);
 490         if (!NT_STATUS_IS_OK(status)) {
 491                 async_req_nterror(req, status);
 492                 return;
 493         }
 494 
 495         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
 496         if (!NT_STATUS_IS_OK(status)) {
 497                 async_req_nterror(req, status);
 498                 return;
 499         }
 500 
 501         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
 502                 async_req_nterror(req, NT_STATUS_NO_MEMORY);
 503                 return;
 504         }
 505 
 506         /*
 507          * We're here in this piece of code because we've read exactly
 508          * RPC_HEADER_LEN bytes into state->pdu.
 509          */
 510 
 511         subreq = rpc_read_send(
 512                 state, state->ev, state->cli->transport,
 513                 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
 514                 state->prhdr->frag_len - RPC_HEADER_LEN);
 515         if (async_req_nomem(subreq, req)) {
 516                 return;
 517         }
 518         subreq->async.fn = get_complete_frag_got_rest;
 519         subreq->async.priv = req;
 520 }
 521 
 522 static void get_complete_frag_got_rest(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
 523 {
 524         struct async_req *req = talloc_get_type_abort(
 525                 subreq->async.priv, struct async_req);
 526         NTSTATUS status;
 527 
 528         status = rpc_read_recv(subreq);
 529         TALLOC_FREE(subreq);
 530         if (!NT_STATUS_IS_OK(status)) {
 531                 async_req_nterror(req, status);
 532                 return;
 533         }
 534         async_req_done(req);
 535 }
 536 
 537 static NTSTATUS get_complete_frag_recv(struct async_req *req)
     /* [<][>][^][v][top][bottom][index][help] */
 538 {
 539         return async_req_simple_recv_ntstatus(req);
 540 }
 541 
 542 /****************************************************************************
 543  NTLMSSP specific sign/seal.
 544  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
 545  In fact I should probably abstract these into identical pieces of code... JRA.
 546  ****************************************************************************/
 547 
 548 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
     /* [<][>][^][v][top][bottom][index][help] */
 549                                 prs_struct *current_pdu,
 550                                 uint8 *p_ss_padding_len)
 551 {
 552         RPC_HDR_AUTH auth_info;
 553         uint32 save_offset = prs_offset(current_pdu);
 554         uint32 auth_len = prhdr->auth_len;
 555         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
 556         unsigned char *data = NULL;
 557         size_t data_len;
 558         unsigned char *full_packet_data = NULL;
 559         size_t full_packet_data_len;
 560         DATA_BLOB auth_blob;
 561         NTSTATUS status;
 562 
 563         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
 564             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
 565                 return NT_STATUS_OK;
 566         }
 567 
 568         if (!ntlmssp_state) {
 569                 return NT_STATUS_INVALID_PARAMETER;
 570         }
 571 
 572         /* Ensure there's enough data for an authenticated response. */
 573         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
 574                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
 575                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
 576                         (unsigned int)auth_len ));
 577                 return NT_STATUS_BUFFER_TOO_SMALL;
 578         }
 579 
 580         /*
 581          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
 582          * after the RPC header.
 583          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
 584          * functions as NTLMv2 checks the rpc headers also.
 585          */
 586 
 587         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
 588         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
 589 
 590         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
 591         full_packet_data_len = prhdr->frag_len - auth_len;
 592 
 593         /* Pull the auth header and the following data into a blob. */
 594         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
 595                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
 596                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
 597                 return NT_STATUS_BUFFER_TOO_SMALL;
 598         }
 599 
 600         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
 601                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
 602                 return NT_STATUS_BUFFER_TOO_SMALL;
 603         }
 604 
 605         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
 606         auth_blob.length = auth_len;
 607 
 608         switch (cli->auth->auth_level) {
 609                 case PIPE_AUTH_LEVEL_PRIVACY:
 610                         /* Data is encrypted. */
 611                         status = ntlmssp_unseal_packet(ntlmssp_state,
 612                                                         data, data_len,
 613                                                         full_packet_data,
 614                                                         full_packet_data_len,
 615                                                         &auth_blob);
 616                         if (!NT_STATUS_IS_OK(status)) {
 617                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
 618                                         "packet from %s. Error was %s.\n",
 619                                         rpccli_pipe_txt(debug_ctx(), cli),
 620                                         nt_errstr(status) ));
 621                                 return status;
 622                         }
 623                         break;
 624                 case PIPE_AUTH_LEVEL_INTEGRITY:
 625                         /* Data is signed. */
 626                         status = ntlmssp_check_packet(ntlmssp_state,
 627                                                         data, data_len,
 628                                                         full_packet_data,
 629                                                         full_packet_data_len,
 630                                                         &auth_blob);
 631                         if (!NT_STATUS_IS_OK(status)) {
 632                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
 633                                         "packet from %s. Error was %s.\n",
 634                                         rpccli_pipe_txt(debug_ctx(), cli),
 635                                         nt_errstr(status) ));
 636                                 return status;
 637                         }
 638                         break;
 639                 default:
 640                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
 641                                   "auth level %d\n", cli->auth->auth_level));
 642                         return NT_STATUS_INVALID_INFO_CLASS;
 643         }
 644 
 645         /*
 646          * Return the current pointer to the data offset.
 647          */
 648 
 649         if(!prs_set_offset(current_pdu, save_offset)) {
 650                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
 651                         (unsigned int)save_offset ));
 652                 return NT_STATUS_BUFFER_TOO_SMALL;
 653         }
 654 
 655         /*
 656          * Remember the padding length. We must remove it from the real data
 657          * stream once the sign/seal is done.
 658          */
 659 
 660         *p_ss_padding_len = auth_info.auth_pad_len;
 661 
 662         return NT_STATUS_OK;
 663 }
 664 
 665 /****************************************************************************
 666  schannel specific sign/seal.
 667  ****************************************************************************/
 668 
 669 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
     /* [<][>][^][v][top][bottom][index][help] */
 670                                 prs_struct *current_pdu,
 671                                 uint8 *p_ss_padding_len)
 672 {
 673         RPC_HDR_AUTH auth_info;
 674         RPC_AUTH_SCHANNEL_CHK schannel_chk;
 675         uint32 auth_len = prhdr->auth_len;
 676         uint32 save_offset = prs_offset(current_pdu);
 677         struct schannel_auth_struct *schannel_auth =
 678                 cli->auth->a_u.schannel_auth;
 679         uint32 data_len;
 680 
 681         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
 682             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
 683                 return NT_STATUS_OK;
 684         }
 685 
 686         if (auth_len < RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
 687                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
 688                 return NT_STATUS_INVALID_PARAMETER;
 689         }
 690 
 691         if (!schannel_auth) {
 692                 return NT_STATUS_INVALID_PARAMETER;
 693         }
 694 
 695         /* Ensure there's enough data for an authenticated response. */
 696         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
 697                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
 698                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
 699                         (unsigned int)auth_len ));
 700                 return NT_STATUS_INVALID_PARAMETER;
 701         }
 702 
 703         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
 704 
 705         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
 706                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
 707                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
 708                 return NT_STATUS_BUFFER_TOO_SMALL;
 709         }
 710 
 711         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
 712                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
 713                 return NT_STATUS_BUFFER_TOO_SMALL;
 714         }
 715 
 716         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
 717                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
 718                         auth_info.auth_type));
 719                 return NT_STATUS_BUFFER_TOO_SMALL;
 720         }
 721 
 722         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
 723                                 &schannel_chk, current_pdu, 0)) {
 724                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
 725                 return NT_STATUS_BUFFER_TOO_SMALL;
 726         }
 727 
 728         if (!schannel_decode(schannel_auth,
 729                         cli->auth->auth_level,
 730                         SENDER_IS_ACCEPTOR,
 731                         &schannel_chk,
 732                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
 733                         data_len)) {
 734                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
 735                                 "Connection to %s.\n",
 736                                 rpccli_pipe_txt(debug_ctx(), cli)));
 737                 return NT_STATUS_INVALID_PARAMETER;
 738         }
 739 
 740         /* The sequence number gets incremented on both send and receive. */
 741         schannel_auth->seq_num++;
 742 
 743         /*
 744          * Return the current pointer to the data offset.
 745          */
 746 
 747         if(!prs_set_offset(current_pdu, save_offset)) {
 748                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
 749                         (unsigned int)save_offset ));
 750                 return NT_STATUS_BUFFER_TOO_SMALL;
 751         }
 752 
 753         /*
 754          * Remember the padding length. We must remove it from the real data
 755          * stream once the sign/seal is done.
 756          */
 757 
 758         *p_ss_padding_len = auth_info.auth_pad_len;
 759 
 760         return NT_STATUS_OK;
 761 }
 762 
 763 /****************************************************************************
 764  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
 765  ****************************************************************************/
 766 
 767 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
     /* [<][>][^][v][top][bottom][index][help] */
 768                                 prs_struct *current_pdu,
 769                                 uint8 *p_ss_padding_len)
 770 {
 771         NTSTATUS ret = NT_STATUS_OK;
 772 
 773         /* Paranioa checks for auth_len. */
 774         if (prhdr->auth_len) {
 775                 if (prhdr->auth_len > prhdr->frag_len) {
 776                         return NT_STATUS_INVALID_PARAMETER;
 777                 }
 778 
 779                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
 780                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
 781                         /* Integer wrap attempt. */
 782                         return NT_STATUS_INVALID_PARAMETER;
 783                 }
 784         }
 785 
 786         /*
 787          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
 788          */
 789 
 790         switch(cli->auth->auth_type) {
 791                 case PIPE_AUTH_TYPE_NONE:
 792                         if (prhdr->auth_len) {
 793                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
 794                                           "Connection to %s - got non-zero "
 795                                           "auth len %u.\n",
 796                                         rpccli_pipe_txt(debug_ctx(), cli),
 797                                         (unsigned int)prhdr->auth_len ));
 798                                 return NT_STATUS_INVALID_PARAMETER;
 799                         }
 800                         break;
 801 
 802                 case PIPE_AUTH_TYPE_NTLMSSP:
 803                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
 804                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
 805                         if (!NT_STATUS_IS_OK(ret)) {
 806                                 return ret;
 807                         }
 808                         break;
 809 
 810                 case PIPE_AUTH_TYPE_SCHANNEL:
 811                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
 812                         if (!NT_STATUS_IS_OK(ret)) {
 813                                 return ret;
 814                         }
 815                         break;
 816 
 817                 case PIPE_AUTH_TYPE_KRB5:
 818                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
 819                 default:
 820                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
 821                                   "to %s - unknown internal auth type %u.\n",
 822                                   rpccli_pipe_txt(debug_ctx(), cli),
 823                                   cli->auth->auth_type ));
 824                         return NT_STATUS_INVALID_INFO_CLASS;
 825         }
 826 
 827         return NT_STATUS_OK;
 828 }
 829 
 830 /****************************************************************************
 831  Do basic authentication checks on an incoming pdu.
 832  ****************************************************************************/
 833 
 834 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
     /* [<][>][^][v][top][bottom][index][help] */
 835                         prs_struct *current_pdu,
 836                         uint8 expected_pkt_type,
 837                         char **ppdata,
 838                         uint32 *pdata_len,
 839                         prs_struct *return_data)
 840 {
 841 
 842         NTSTATUS ret = NT_STATUS_OK;
 843         uint32 current_pdu_len = prs_data_size(current_pdu);
 844 
 845         if (current_pdu_len != prhdr->frag_len) {
 846                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
 847                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
 848                 return NT_STATUS_INVALID_PARAMETER;
 849         }
 850 
 851         /*
 852          * Point the return values at the real data including the RPC
 853          * header. Just in case the caller wants it.
 854          */
 855         *ppdata = prs_data_p(current_pdu);
 856         *pdata_len = current_pdu_len;
 857 
 858         /* Ensure we have the correct type. */
 859         switch (prhdr->pkt_type) {
 860                 case RPC_ALTCONTRESP:
 861                 case RPC_BINDACK:
 862 
 863                         /* Alter context and bind ack share the same packet definitions. */
 864                         break;
 865 
 866 
 867                 case RPC_RESPONSE:
 868                 {
 869                         RPC_HDR_RESP rhdr_resp;
 870                         uint8 ss_padding_len = 0;
 871 
 872                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
 873                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
 874                                 return NT_STATUS_BUFFER_TOO_SMALL;
 875                         }
 876 
 877                         /* Here's where we deal with incoming sign/seal. */
 878                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
 879                                         current_pdu, &ss_padding_len);
 880                         if (!NT_STATUS_IS_OK(ret)) {
 881                                 return ret;
 882                         }
 883 
 884                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
 885                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
 886 
 887                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
 888                                 return NT_STATUS_BUFFER_TOO_SMALL;
 889                         }
 890 
 891                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
 892 
 893                         /* Remember to remove the auth footer. */
 894                         if (prhdr->auth_len) {
 895                                 /* We've already done integer wrap tests on auth_len in
 896                                         cli_pipe_validate_rpc_response(). */
 897                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
 898                                         return NT_STATUS_BUFFER_TOO_SMALL;
 899                                 }
 900                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
 901                         }
 902 
 903                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
 904                                 current_pdu_len, *pdata_len, ss_padding_len ));
 905 
 906                         /*
 907                          * If this is the first reply, and the allocation hint is reasonably, try and
 908                          * set up the return_data parse_struct to the correct size.
 909                          */
 910 
 911                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
 912                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
 913                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
 914                                                 "too large to allocate\n",
 915                                                 (unsigned int)rhdr_resp.alloc_hint ));
 916                                         return NT_STATUS_NO_MEMORY;
 917                                 }
 918                         }
 919 
 920                         break;
 921                 }
 922 
 923                 case RPC_BINDNACK:
 924                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
 925                                   "received from %s!\n",
 926                                   rpccli_pipe_txt(debug_ctx(), cli)));
 927                         /* Use this for now... */
 928                         return NT_STATUS_NETWORK_ACCESS_DENIED;
 929 
 930                 case RPC_FAULT:
 931                 {
 932                         RPC_HDR_RESP rhdr_resp;
 933                         RPC_HDR_FAULT fault_resp;
 934 
 935                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
 936                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
 937                                 return NT_STATUS_BUFFER_TOO_SMALL;
 938                         }
 939 
 940                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
 941                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
 942                                 return NT_STATUS_BUFFER_TOO_SMALL;
 943                         }
 944 
 945                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
 946                                   "code %s received from %s!\n",
 947                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
 948                                 rpccli_pipe_txt(debug_ctx(), cli)));
 949                         if (NT_STATUS_IS_OK(fault_resp.status)) {
 950                                 return NT_STATUS_UNSUCCESSFUL;
 951                         } else {
 952                                 return fault_resp.status;
 953                         }
 954                 }
 955 
 956                 default:
 957                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
 958                                 "from %s!\n",
 959                                 (unsigned int)prhdr->pkt_type,
 960                                 rpccli_pipe_txt(debug_ctx(), cli)));
 961                         return NT_STATUS_INVALID_INFO_CLASS;
 962         }
 963 
 964         if (prhdr->pkt_type != expected_pkt_type) {
 965                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
 966                           "got an unexpected RPC packet type - %u, not %u\n",
 967                         rpccli_pipe_txt(debug_ctx(), cli),
 968                         prhdr->pkt_type,
 969                         expected_pkt_type));
 970                 return NT_STATUS_INVALID_INFO_CLASS;
 971         }
 972 
 973         /* Do this just before return - we don't want to modify any rpc header
 974            data before now as we may have needed to do cryptographic actions on
 975            it before. */
 976 
 977         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
 978                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
 979                         "setting fragment first/last ON.\n"));
 980                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
 981         }
 982 
 983         return NT_STATUS_OK;
 984 }
 985 
 986 /****************************************************************************
 987  Ensure we eat the just processed pdu from the current_pdu prs_struct.
 988  Normally the frag_len and buffer size will match, but on the first trans
 989  reply there is a theoretical chance that buffer size > frag_len, so we must
 990  deal with that.
 991  ****************************************************************************/
 992 
 993 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
     /* [<][>][^][v][top][bottom][index][help] */
 994 {
 995         uint32 current_pdu_len = prs_data_size(current_pdu);
 996 
 997         if (current_pdu_len < prhdr->frag_len) {
 998                 return NT_STATUS_BUFFER_TOO_SMALL;
 999         }
1000 
1001         /* Common case. */
1002         if (current_pdu_len == (uint32)prhdr->frag_len) {
1003                 prs_mem_free(current_pdu);
1004                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1005                 /* Make current_pdu dynamic with no memory. */
1006                 prs_give_memory(current_pdu, 0, 0, True);
1007                 return NT_STATUS_OK;
1008         }
1009 
1010         /*
1011          * Oh no ! More data in buffer than we processed in current pdu.
1012          * Cheat. Move the data down and shrink the buffer.
1013          */
1014 
1015         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1016                         current_pdu_len - prhdr->frag_len);
1017 
1018         /* Remember to set the read offset back to zero. */
1019         prs_set_offset(current_pdu, 0);
1020 
1021         /* Shrink the buffer. */
1022         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1023                 return NT_STATUS_BUFFER_TOO_SMALL;
1024         }
1025 
1026         return NT_STATUS_OK;
1027 }
1028 
1029 /****************************************************************************
1030  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1031 ****************************************************************************/
1032 
1033 struct cli_api_pipe_state {
1034         struct event_context *ev;
1035         struct rpc_cli_transport *transport;
1036         uint8_t *rdata;
1037         uint32_t rdata_len;
1038 };
1039 
1040 static void cli_api_pipe_trans_done(struct async_req *subreq);
1041 static void cli_api_pipe_write_done(struct async_req *subreq);
1042 static void cli_api_pipe_read_done(struct async_req *subreq);
1043 
1044 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
1045                                            struct event_context *ev,
1046                                            struct rpc_cli_transport *transport,
1047                                            uint8_t *data, size_t data_len,
1048                                            uint32_t max_rdata_len)
1049 {
1050         struct async_req *result, *subreq;
1051         struct cli_api_pipe_state *state;
1052         NTSTATUS status;
1053 
1054         if (!async_req_setup(mem_ctx, &result, &state,
1055                              struct cli_api_pipe_state)) {
1056                 return NULL;
1057         }
1058         state->ev = ev;
1059         state->transport = transport;
1060 
1061         if (max_rdata_len < RPC_HEADER_LEN) {
1062                 /*
1063                  * For a RPC reply we always need at least RPC_HEADER_LEN
1064                  * bytes. We check this here because we will receive
1065                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1066                  */
1067                 status = NT_STATUS_INVALID_PARAMETER;
1068                 goto post_status;
1069         }
1070 
1071         if (transport->trans_send != NULL) {
1072                 subreq = transport->trans_send(state, ev, data, data_len,
1073                                                max_rdata_len, transport->priv);
1074                 if (subreq == NULL) {
1075                         status = NT_STATUS_NO_MEMORY;
1076                         goto post_status;
1077                 }
1078                 subreq->async.fn = cli_api_pipe_trans_done;
1079                 subreq->async.priv = result;
1080                 return result;
1081         }
1082 
1083         /*
1084          * If the transport does not provide a "trans" routine, i.e. for
1085          * example the ncacn_ip_tcp transport, do the write/read step here.
1086          */
1087 
1088         subreq = rpc_write_send(state, ev, transport, data, data_len);
1089         if (subreq == NULL) {
1090                 goto fail;
1091         }
1092         subreq->async.fn = cli_api_pipe_write_done;
1093         subreq->async.priv = result;
1094         return result;
1095 
1096         status = NT_STATUS_INVALID_PARAMETER;
1097 
1098  post_status:
1099         if (async_post_ntstatus(result, ev, status)) {
1100                 return result;
1101         }
1102  fail:
1103         TALLOC_FREE(result);
1104         return NULL;
1105 }
1106 
1107 static void cli_api_pipe_trans_done(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
1108 {
1109         struct async_req *req = talloc_get_type_abort(
1110                 subreq->async.priv, struct async_req);
1111         struct cli_api_pipe_state *state = talloc_get_type_abort(
1112                 req->private_data, struct cli_api_pipe_state);
1113         NTSTATUS status;
1114 
1115         status = state->transport->trans_recv(subreq, state, &state->rdata,
1116                                               &state->rdata_len);
1117         TALLOC_FREE(subreq);
1118         if (!NT_STATUS_IS_OK(status)) {
1119                 async_req_nterror(req, status);
1120                 return;
1121         }
1122         async_req_done(req);
1123 }
1124 
1125 static void cli_api_pipe_write_done(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
1126 {
1127         struct async_req *req = talloc_get_type_abort(
1128                 subreq->async.priv, struct async_req);
1129         struct cli_api_pipe_state *state = talloc_get_type_abort(
1130                 req->private_data, struct cli_api_pipe_state);
1131         NTSTATUS status;
1132 
1133         status = rpc_write_recv(subreq);
1134         TALLOC_FREE(subreq);
1135         if (!NT_STATUS_IS_OK(status)) {
1136                 async_req_nterror(req, status);
1137                 return;
1138         }
1139 
1140         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1141         if (async_req_nomem(state->rdata, req)) {
1142                 return;
1143         }
1144 
1145         /*
1146          * We don't need to use rpc_read_send here, the upper layer will cope
1147          * with a short read, transport->trans_send could also return less
1148          * than state->max_rdata_len.
1149          */
1150         subreq = state->transport->read_send(state, state->ev, state->rdata,
1151                                              RPC_HEADER_LEN,
1152                                              state->transport->priv);
1153         if (async_req_nomem(subreq, req)) {
1154                 return;
1155         }
1156         subreq->async.fn = cli_api_pipe_read_done;
1157         subreq->async.priv = req;
1158 }
1159 
1160 static void cli_api_pipe_read_done(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
1161 {
1162         struct async_req *req = talloc_get_type_abort(
1163                 subreq->async.priv, struct async_req);
1164         struct cli_api_pipe_state *state = talloc_get_type_abort(
1165                 req->private_data, struct cli_api_pipe_state);
1166         NTSTATUS status;
1167         ssize_t received;
1168 
1169         status = state->transport->read_recv(subreq, &received);
1170         TALLOC_FREE(subreq);
1171         if (!NT_STATUS_IS_OK(status)) {
1172                 async_req_nterror(req, status);
1173                 return;
1174         }
1175         state->rdata_len = received;
1176         async_req_done(req);
1177 }
1178 
1179 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
1180                                   uint8_t **prdata, uint32_t *prdata_len)
1181 {
1182         struct cli_api_pipe_state *state = talloc_get_type_abort(
1183                 req->private_data, struct cli_api_pipe_state);
1184         NTSTATUS status;
1185 
1186         if (async_req_is_nterror(req, &status)) {
1187                 return status;
1188         }
1189 
1190         *prdata = talloc_move(mem_ctx, &state->rdata);
1191         *prdata_len = state->rdata_len;
1192         return NT_STATUS_OK;
1193 }
1194 
1195 /****************************************************************************
1196  Send data on an rpc pipe via trans. The prs_struct data must be the last
1197  pdu fragment of an NDR data stream.
1198 
1199  Receive response data from an rpc pipe, which may be large...
1200 
1201  Read the first fragment: unfortunately have to use SMBtrans for the first
1202  bit, then SMBreadX for subsequent bits.
1203 
1204  If first fragment received also wasn't the last fragment, continue
1205  getting fragments until we _do_ receive the last fragment.
1206 
1207  Request/Response PDU's look like the following...
1208 
1209  |<------------------PDU len----------------------------------------------->|
1210  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1211 
1212  +------------+-----------------+-------------+---------------+-------------+
1213  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1214  +------------+-----------------+-------------+---------------+-------------+
1215 
1216  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1217  signing & sealing being negotiated.
1218 
1219  ****************************************************************************/
1220 
1221 struct rpc_api_pipe_state {
1222         struct event_context *ev;
1223         struct rpc_pipe_client *cli;
1224         uint8_t expected_pkt_type;
1225 
1226         prs_struct incoming_frag;
1227         struct rpc_hdr_info rhdr;
1228 
1229         prs_struct incoming_pdu;        /* Incoming reply */
1230         uint32_t incoming_pdu_offset;
1231 };
1232 
1233 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
     /* [<][>][^][v][top][bottom][index][help] */
1234 {
1235         prs_mem_free(&state->incoming_frag);
1236         prs_mem_free(&state->incoming_pdu);
1237         return 0;
1238 }
1239 
1240 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1241 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1242 
1243 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
1244                                            struct event_context *ev,
1245                                            struct rpc_pipe_client *cli,
1246                                            prs_struct *data, /* Outgoing PDU */
1247                                            uint8_t expected_pkt_type)
1248 {
1249         struct async_req *result, *subreq;
1250         struct rpc_api_pipe_state *state;
1251         uint16_t max_recv_frag;
1252         NTSTATUS status;
1253 
1254         if (!async_req_setup(mem_ctx, &result, &state,
1255                              struct rpc_api_pipe_state)) {
1256                 return NULL;
1257         }
1258         state->ev = ev;
1259         state->cli = cli;
1260         state->expected_pkt_type = expected_pkt_type;
1261         state->incoming_pdu_offset = 0;
1262 
1263         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1264 
1265         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1266         /* Make incoming_pdu dynamic with no memory. */
1267         prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1268 
1269         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1270 
1271         /*
1272          * Ensure we're not sending too much.
1273          */
1274         if (prs_offset(data) > cli->max_xmit_frag) {
1275                 status = NT_STATUS_INVALID_PARAMETER;
1276                 goto post_status;
1277         }
1278 
1279         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1280 
1281         max_recv_frag = cli->max_recv_frag;
1282 
1283 #if 0
1284         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1285 #endif
1286 
1287         subreq = cli_api_pipe_send(state, ev, cli->transport,
1288                                    (uint8_t *)prs_data_p(data),
1289                                    prs_offset(data), max_recv_frag);
1290         if (subreq == NULL) {
1291                 status = NT_STATUS_NO_MEMORY;
1292                 goto post_status;
1293         }
1294         subreq->async.fn = rpc_api_pipe_trans_done;
1295         subreq->async.priv = result;
1296         return result;
1297 
1298  post_status:
1299         if (async_post_ntstatus(result, ev, status)) {
1300                 return result;
1301         }
1302         TALLOC_FREE(result);
1303         return NULL;
1304 }
1305 
1306 static void rpc_api_pipe_trans_done(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
1307 {
1308         struct async_req *req = talloc_get_type_abort(
1309                 subreq->async.priv, struct async_req);
1310         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1311                 req->private_data, struct rpc_api_pipe_state);
1312         NTSTATUS status;
1313         uint8_t *rdata = NULL;
1314         uint32_t rdata_len = 0;
1315         char *rdata_copy;
1316 
1317         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1318         TALLOC_FREE(subreq);
1319         if (!NT_STATUS_IS_OK(status)) {
1320                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1321                 async_req_nterror(req, status);
1322                 return;
1323         }
1324 
1325         if (rdata == NULL) {
1326                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1327                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1328                 async_req_done(req);
1329                 return;
1330         }
1331 
1332         /*
1333          * Give the memory received from cli_trans as dynamic to the current
1334          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1335          * :-(
1336          */
1337         rdata_copy = (char *)memdup(rdata, rdata_len);
1338         TALLOC_FREE(rdata);
1339         if (async_req_nomem(rdata_copy, req)) {
1340                 return;
1341         }
1342         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1343 
1344         /* Ensure we have enough data for a pdu. */
1345         subreq = get_complete_frag_send(state, state->ev, state->cli,
1346                                         &state->rhdr, &state->incoming_frag);
1347         if (async_req_nomem(subreq, req)) {
1348                 return;
1349         }
1350         subreq->async.fn = rpc_api_pipe_got_pdu;
1351         subreq->async.priv = req;
1352 }
1353 
1354 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
1355 {
1356         struct async_req *req = talloc_get_type_abort(
1357                 subreq->async.priv, struct async_req);
1358         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1359                 req->private_data, struct rpc_api_pipe_state);
1360         NTSTATUS status;
1361         char *rdata = NULL;
1362         uint32_t rdata_len = 0;
1363 
1364         status = get_complete_frag_recv(subreq);
1365         TALLOC_FREE(subreq);
1366         if (!NT_STATUS_IS_OK(status)) {
1367                 DEBUG(5, ("get_complete_frag failed: %s\n",
1368                           nt_errstr(status)));
1369                 async_req_nterror(req, status);
1370                 return;
1371         }
1372 
1373         status = cli_pipe_validate_current_pdu(
1374                 state->cli, &state->rhdr, &state->incoming_frag,
1375                 state->expected_pkt_type, &rdata, &rdata_len,
1376                 &state->incoming_pdu);
1377 
1378         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1379                   (unsigned)prs_data_size(&state->incoming_frag),
1380                   (unsigned)state->incoming_pdu_offset,
1381                   nt_errstr(status)));
1382 
1383         if (!NT_STATUS_IS_OK(status)) {
1384                 async_req_nterror(req, status);
1385                 return;
1386         }
1387 
1388         if ((state->rhdr.flags & RPC_FLG_FIRST)
1389             && (state->rhdr.pack_type[0] == 0)) {
1390                 /*
1391                  * Set the data type correctly for big-endian data on the
1392                  * first packet.
1393                  */
1394                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1395                           "big-endian.\n",
1396                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1397                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1398         }
1399         /*
1400          * Check endianness on subsequent packets.
1401          */
1402         if (state->incoming_frag.bigendian_data
1403             != state->incoming_pdu.bigendian_data) {
1404                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1405                          "%s\n",
1406                          state->incoming_pdu.bigendian_data?"big":"little",
1407                          state->incoming_frag.bigendian_data?"big":"little"));
1408                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1409                 return;
1410         }
1411 
1412         /* Now copy the data portion out of the pdu into rbuf. */
1413         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1414                 async_req_nterror(req, NT_STATUS_NO_MEMORY);
1415                 return;
1416         }
1417 
1418         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1419                rdata, (size_t)rdata_len);
1420         state->incoming_pdu_offset += rdata_len;
1421 
1422         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1423                                             &state->incoming_frag);
1424         if (!NT_STATUS_IS_OK(status)) {
1425                 async_req_nterror(req, status);
1426                 return;
1427         }
1428 
1429         if (state->rhdr.flags & RPC_FLG_LAST) {
1430                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1431                           rpccli_pipe_txt(debug_ctx(), state->cli),
1432                           (unsigned)prs_data_size(&state->incoming_pdu)));
1433                 async_req_done(req);
1434                 return;
1435         }
1436 
1437         subreq = get_complete_frag_send(state, state->ev, state->cli,
1438                                         &state->rhdr, &state->incoming_frag);
1439         if (async_req_nomem(subreq, req)) {
1440                 return;
1441         }
1442         subreq->async.fn = rpc_api_pipe_got_pdu;
1443         subreq->async.priv = req;
1444 }
1445 
1446 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
1447                                   prs_struct *reply_pdu)
1448 {
1449         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1450                 req->private_data, struct rpc_api_pipe_state);
1451         NTSTATUS status;
1452 
1453         if (async_req_is_nterror(req, &status)) {
1454                 return status;
1455         }
1456 
1457         *reply_pdu = state->incoming_pdu;
1458         reply_pdu->mem_ctx = mem_ctx;
1459 
1460         /*
1461          * Prevent state->incoming_pdu from being freed in
1462          * rpc_api_pipe_state_destructor()
1463          */
1464         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1465 
1466         return NT_STATUS_OK;
1467 }
1468 
1469 /*******************************************************************
1470  Creates krb5 auth bind.
1471  ********************************************************************/
1472 
1473 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
1474                                                 enum pipe_auth_level auth_level,
1475                                                 RPC_HDR_AUTH *pauth_out,
1476                                                 prs_struct *auth_data)
1477 {
1478 #ifdef HAVE_KRB5
1479         int ret;
1480         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1481         DATA_BLOB tkt = data_blob_null;
1482         DATA_BLOB tkt_wrapped = data_blob_null;
1483 
1484         /* We may change the pad length before marshalling. */
1485         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1486 
1487         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1488                 a->service_principal ));
1489 
1490         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1491 
1492         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1493                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1494 
1495         if (ret) {
1496                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1497                         "failed with %s\n",
1498                         a->service_principal,
1499                         error_message(ret) ));
1500 
1501                 data_blob_free(&tkt);
1502                 prs_mem_free(auth_data);
1503                 return NT_STATUS_INVALID_PARAMETER;
1504         }
1505 
1506         /* wrap that up in a nice GSS-API wrapping */
1507         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1508 
1509         data_blob_free(&tkt);
1510 
1511         /* Auth len in the rpc header doesn't include auth_header. */
1512         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1513                 data_blob_free(&tkt_wrapped);
1514                 prs_mem_free(auth_data);
1515                 return NT_STATUS_NO_MEMORY;
1516         }
1517 
1518         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1519         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1520 
1521         data_blob_free(&tkt_wrapped);
1522         return NT_STATUS_OK;
1523 #else
1524         return NT_STATUS_INVALID_PARAMETER;
1525 #endif
1526 }
1527 
1528 /*******************************************************************
1529  Creates SPNEGO NTLMSSP auth bind.
1530  ********************************************************************/
1531 
1532 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
1533                                                 enum pipe_auth_level auth_level,
1534                                                 RPC_HDR_AUTH *pauth_out,
1535                                                 prs_struct *auth_data)
1536 {
1537         NTSTATUS nt_status;
1538         DATA_BLOB null_blob = data_blob_null;
1539         DATA_BLOB request = data_blob_null;
1540         DATA_BLOB spnego_msg = data_blob_null;
1541 
1542         /* We may change the pad length before marshalling. */
1543         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1544 
1545         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1546         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1547                                         null_blob,
1548                                         &request);
1549 
1550         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1551                 data_blob_free(&request);
1552                 prs_mem_free(auth_data);
1553                 return nt_status;
1554         }
1555 
1556         /* Wrap this in SPNEGO. */
1557         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1558 
1559         data_blob_free(&request);
1560 
1561         /* Auth len in the rpc header doesn't include auth_header. */
1562         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1563                 data_blob_free(&spnego_msg);
1564                 prs_mem_free(auth_data);
1565                 return NT_STATUS_NO_MEMORY;
1566         }
1567 
1568         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1569         dump_data(5, spnego_msg.data, spnego_msg.length);
1570 
1571         data_blob_free(&spnego_msg);
1572         return NT_STATUS_OK;
1573 }
1574 
1575 /*******************************************************************
1576  Creates NTLMSSP auth bind.
1577  ********************************************************************/
1578 
1579 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
1580                                                 enum pipe_auth_level auth_level,
1581                                                 RPC_HDR_AUTH *pauth_out,
1582                                                 prs_struct *auth_data)
1583 {
1584         NTSTATUS nt_status;
1585         DATA_BLOB null_blob = data_blob_null;
1586         DATA_BLOB request = data_blob_null;
1587 
1588         /* We may change the pad length before marshalling. */
1589         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1590 
1591         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1592         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1593                                         null_blob,
1594                                         &request);
1595 
1596         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1597                 data_blob_free(&request);
1598                 prs_mem_free(auth_data);
1599                 return nt_status;
1600         }
1601 
1602         /* Auth len in the rpc header doesn't include auth_header. */
1603         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1604                 data_blob_free(&request);
1605                 prs_mem_free(auth_data);
1606                 return NT_STATUS_NO_MEMORY;
1607         }
1608 
1609         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1610         dump_data(5, request.data, request.length);
1611 
1612         data_blob_free(&request);
1613         return NT_STATUS_OK;
1614 }
1615 
1616 /*******************************************************************
1617  Creates schannel auth bind.
1618  ********************************************************************/
1619 
1620 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
1621                                                 enum pipe_auth_level auth_level,
1622                                                 RPC_HDR_AUTH *pauth_out,
1623                                                 prs_struct *auth_data)
1624 {
1625         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1626 
1627         /* We may change the pad length before marshalling. */
1628         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1629 
1630         /* Use lp_workgroup() if domain not specified */
1631 
1632         if (!cli->auth->domain || !cli->auth->domain[0]) {
1633                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1634                 if (cli->auth->domain == NULL) {
1635                         return NT_STATUS_NO_MEMORY;
1636                 }
1637         }
1638 
1639         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1640                                    global_myname());
1641 
1642         /*
1643          * Now marshall the data into the auth parse_struct.
1644          */
1645 
1646         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1647                                        &schannel_neg, auth_data, 0)) {
1648                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1649                 prs_mem_free(auth_data);
1650                 return NT_STATUS_NO_MEMORY;
1651         }
1652 
1653         return NT_STATUS_OK;
1654 }
1655 
1656 /*******************************************************************
1657  Creates the internals of a DCE/RPC bind request or alter context PDU.
1658  ********************************************************************/
1659 
1660 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
     /* [<][>][^][v][top][bottom][index][help] */
1661                                                 prs_struct *rpc_out, 
1662                                                 uint32 rpc_call_id,
1663                                                 const RPC_IFACE *abstract,
1664                                                 const RPC_IFACE *transfer,
1665                                                 RPC_HDR_AUTH *phdr_auth,
1666                                                 prs_struct *pauth_info)
1667 {
1668         RPC_HDR hdr;
1669         RPC_HDR_RB hdr_rb;
1670         RPC_CONTEXT rpc_ctx;
1671         uint16 auth_len = prs_offset(pauth_info);
1672         uint8 ss_padding_len = 0;
1673         uint16 frag_len = 0;
1674 
1675         /* create the RPC context. */
1676         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1677 
1678         /* create the bind request RPC_HDR_RB */
1679         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1680 
1681         /* Start building the frag length. */
1682         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1683 
1684         /* Do we need to pad ? */
1685         if (auth_len) {
1686                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1687                 if (data_len % 8) {
1688                         ss_padding_len = 8 - (data_len % 8);
1689                         phdr_auth->auth_pad_len = ss_padding_len;
1690                 }
1691                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1692         }
1693 
1694         /* Create the request RPC_HDR */
1695         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1696 
1697         /* Marshall the RPC header */
1698         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1699                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1700                 return NT_STATUS_NO_MEMORY;
1701         }
1702 
1703         /* Marshall the bind request data */
1704         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1705                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1706                 return NT_STATUS_NO_MEMORY;
1707         }
1708 
1709         /*
1710          * Grow the outgoing buffer to store any auth info.
1711          */
1712 
1713         if(auth_len != 0) {
1714                 if (ss_padding_len) {
1715                         char pad[8];
1716                         memset(pad, '\0', 8);
1717                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1718                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1719                                 return NT_STATUS_NO_MEMORY;
1720                         }
1721                 }
1722 
1723                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1724                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1725                         return NT_STATUS_NO_MEMORY;
1726                 }
1727 
1728 
1729                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1730                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1731                         return NT_STATUS_NO_MEMORY;
1732                 }
1733         }
1734 
1735         return NT_STATUS_OK;
1736 }
1737 
1738 /*******************************************************************
1739  Creates a DCE/RPC bind request.
1740  ********************************************************************/
1741 
1742 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
1743                                 prs_struct *rpc_out, 
1744                                 uint32 rpc_call_id,
1745                                 const RPC_IFACE *abstract,
1746                                 const RPC_IFACE *transfer,
1747                                 enum pipe_auth_type auth_type,
1748                                 enum pipe_auth_level auth_level)
1749 {
1750         RPC_HDR_AUTH hdr_auth;
1751         prs_struct auth_info;
1752         NTSTATUS ret = NT_STATUS_OK;
1753 
1754         ZERO_STRUCT(hdr_auth);
1755         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1756                 return NT_STATUS_NO_MEMORY;
1757 
1758         switch (auth_type) {
1759                 case PIPE_AUTH_TYPE_SCHANNEL:
1760                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1761                         if (!NT_STATUS_IS_OK(ret)) {
1762                                 prs_mem_free(&auth_info);
1763                                 return ret;
1764                         }
1765                         break;
1766 
1767                 case PIPE_AUTH_TYPE_NTLMSSP:
1768                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1769                         if (!NT_STATUS_IS_OK(ret)) {
1770                                 prs_mem_free(&auth_info);
1771                                 return ret;
1772                         }
1773                         break;
1774 
1775                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1776                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1777                         if (!NT_STATUS_IS_OK(ret)) {
1778                                 prs_mem_free(&auth_info);
1779                                 return ret;
1780                         }
1781                         break;
1782 
1783                 case PIPE_AUTH_TYPE_KRB5:
1784                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1785                         if (!NT_STATUS_IS_OK(ret)) {
1786                                 prs_mem_free(&auth_info);
1787                                 return ret;
1788                         }
1789                         break;
1790 
1791                 case PIPE_AUTH_TYPE_NONE:
1792                         break;
1793 
1794                 default:
1795                         /* "Can't" happen. */
1796                         return NT_STATUS_INVALID_INFO_CLASS;
1797         }
1798 
1799         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1800                                                 rpc_out, 
1801                                                 rpc_call_id,
1802                                                 abstract,
1803                                                 transfer,
1804                                                 &hdr_auth,
1805                                                 &auth_info);
1806 
1807         prs_mem_free(&auth_info);
1808         return ret;
1809 }
1810 
1811 /*******************************************************************
1812  Create and add the NTLMSSP sign/seal auth header and data.
1813  ********************************************************************/
1814 
1815 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
1816                                         RPC_HDR *phdr,
1817                                         uint32 ss_padding_len,
1818                                         prs_struct *outgoing_pdu)
1819 {
1820         RPC_HDR_AUTH auth_info;
1821         NTSTATUS status;
1822         DATA_BLOB auth_blob = data_blob_null;
1823         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1824 
1825         if (!cli->auth->a_u.ntlmssp_state) {
1826                 return NT_STATUS_INVALID_PARAMETER;
1827         }
1828 
1829         /* Init and marshall the auth header. */
1830         init_rpc_hdr_auth(&auth_info,
1831                         map_pipe_auth_type_to_rpc_auth_type(
1832                                 cli->auth->auth_type),
1833                         cli->auth->auth_level,
1834                         ss_padding_len,
1835                         1 /* context id. */);
1836 
1837         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1838                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1839                 data_blob_free(&auth_blob);
1840                 return NT_STATUS_NO_MEMORY;
1841         }
1842 
1843         switch (cli->auth->auth_level) {
1844                 case PIPE_AUTH_LEVEL_PRIVACY:
1845                         /* Data portion is encrypted. */
1846                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1847                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1848                                         data_and_pad_len,
1849                                         (unsigned char *)prs_data_p(outgoing_pdu),
1850                                         (size_t)prs_offset(outgoing_pdu),
1851                                         &auth_blob);
1852                         if (!NT_STATUS_IS_OK(status)) {
1853                                 data_blob_free(&auth_blob);
1854                                 return status;
1855                         }
1856                         break;
1857 
1858                 case PIPE_AUTH_LEVEL_INTEGRITY:
1859                         /* Data is signed. */
1860                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1861                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1862                                         data_and_pad_len,
1863                                         (unsigned char *)prs_data_p(outgoing_pdu),
1864                                         (size_t)prs_offset(outgoing_pdu),
1865                                         &auth_blob);
1866                         if (!NT_STATUS_IS_OK(status)) {
1867                                 data_blob_free(&auth_blob);
1868                                 return status;
1869                         }
1870                         break;
1871 
1872                 default:
1873                         /* Can't happen. */
1874                         smb_panic("bad auth level");
1875                         /* Notreached. */
1876                         return NT_STATUS_INVALID_PARAMETER;
1877         }
1878 
1879         /* Finally marshall the blob. */
1880 
1881         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1882                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1883                         (unsigned int)NTLMSSP_SIG_SIZE));
1884                 data_blob_free(&auth_blob);
1885                 return NT_STATUS_NO_MEMORY;
1886         }
1887 
1888         data_blob_free(&auth_blob);
1889         return NT_STATUS_OK;
1890 }
1891 
1892 /*******************************************************************
1893  Create and add the schannel sign/seal auth header and data.
1894  ********************************************************************/
1895 
1896 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
1897                                         RPC_HDR *phdr,
1898                                         uint32 ss_padding_len,
1899                                         prs_struct *outgoing_pdu)
1900 {
1901         RPC_HDR_AUTH auth_info;
1902         RPC_AUTH_SCHANNEL_CHK verf;
1903         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1904         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1905         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1906 
1907         if (!sas) {
1908                 return NT_STATUS_INVALID_PARAMETER;
1909         }
1910 
1911         /* Init and marshall the auth header. */
1912         init_rpc_hdr_auth(&auth_info,
1913                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1914                         cli->auth->auth_level,
1915                         ss_padding_len,
1916                         1 /* context id. */);
1917 
1918         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1919                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1920                 return NT_STATUS_NO_MEMORY;
1921         }
1922 
1923         switch (cli->auth->auth_level) {
1924                 case PIPE_AUTH_LEVEL_PRIVACY:
1925                 case PIPE_AUTH_LEVEL_INTEGRITY:
1926                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1927                                 sas->seq_num));
1928 
1929                         schannel_encode(sas,
1930                                         cli->auth->auth_level,
1931                                         SENDER_IS_INITIATOR,
1932                                         &verf,
1933                                         data_p,
1934                                         data_and_pad_len);
1935 
1936                         sas->seq_num++;
1937                         break;
1938 
1939                 default:
1940                         /* Can't happen. */
1941                         smb_panic("bad auth level");
1942                         /* Notreached. */
1943                         return NT_STATUS_INVALID_PARAMETER;
1944         }
1945 
1946         /* Finally marshall the blob. */
1947         smb_io_rpc_auth_schannel_chk("",
1948                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1949                         &verf,
1950                         outgoing_pdu,
1951                         0);
1952 
1953         return NT_STATUS_OK;
1954 }
1955 
1956 /*******************************************************************
1957  Calculate how much data we're going to send in this packet, also
1958  work out any sign/seal padding length.
1959  ********************************************************************/
1960 
1961 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
1962                                         uint32 data_left,
1963                                         uint16 *p_frag_len,
1964                                         uint16 *p_auth_len,
1965                                         uint32 *p_ss_padding)
1966 {
1967         uint32 data_space, data_len;
1968 
1969 #if 0
1970         if ((data_left > 0) && (sys_random() % 2)) {
1971                 data_left = MAX(data_left/2, 1);
1972         }
1973 #endif
1974 
1975         switch (cli->auth->auth_level) {
1976                 case PIPE_AUTH_LEVEL_NONE:
1977                 case PIPE_AUTH_LEVEL_CONNECT:
1978                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1979                         data_len = MIN(data_space, data_left);
1980                         *p_ss_padding = 0;
1981                         *p_auth_len = 0;
1982                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1983                         return data_len;
1984 
1985                 case PIPE_AUTH_LEVEL_INTEGRITY:
1986                 case PIPE_AUTH_LEVEL_PRIVACY:
1987                         /* Treat the same for all authenticated rpc requests. */
1988                         switch(cli->auth->auth_type) {
1989                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1990                                 case PIPE_AUTH_TYPE_NTLMSSP:
1991                                         *p_auth_len = NTLMSSP_SIG_SIZE;
1992                                         break;
1993                                 case PIPE_AUTH_TYPE_SCHANNEL:
1994                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1995                                         break;
1996                                 default:
1997                                         smb_panic("bad auth type");
1998                                         break;
1999                         }
2000 
2001                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2002                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2003 
2004                         data_len = MIN(data_space, data_left);
2005                         *p_ss_padding = 0;
2006                         if (data_len % 8) {
2007                                 *p_ss_padding = 8 - (data_len % 8);
2008                         }
2009                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2010                                         data_len + *p_ss_padding +              /* data plus padding. */
2011                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2012                         return data_len;
2013 
2014                 default:
2015                         smb_panic("bad auth level");
2016                         /* Notreached. */
2017                         return 0;
2018         }
2019 }
2020 
2021 /*******************************************************************
2022  External interface.
2023  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2024  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2025  and deals with signing/sealing details.
2026  ********************************************************************/
2027 
2028 struct rpc_api_pipe_req_state {
2029         struct event_context *ev;
2030         struct rpc_pipe_client *cli;
2031         uint8_t op_num;
2032         uint32_t call_id;
2033         prs_struct *req_data;
2034         uint32_t req_data_sent;
2035         prs_struct outgoing_frag;
2036         prs_struct reply_pdu;
2037 };
2038 
2039 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
     /* [<][>][^][v][top][bottom][index][help] */
2040 {
2041         prs_mem_free(&s->outgoing_frag);
2042         prs_mem_free(&s->reply_pdu);
2043         return 0;
2044 }
2045 
2046 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2047 static void rpc_api_pipe_req_done(struct async_req *subreq);
2048 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2049                                   bool *is_last_frag);
2050 
2051 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
2052                                         struct event_context *ev,
2053                                         struct rpc_pipe_client *cli,
2054                                         uint8_t op_num,
2055                                         prs_struct *req_data)
2056 {
2057         struct async_req *result, *subreq;
2058         struct rpc_api_pipe_req_state *state;
2059         NTSTATUS status;
2060         bool is_last_frag;
2061 
2062         if (!async_req_setup(mem_ctx, &result, &state,
2063                              struct rpc_api_pipe_req_state)) {
2064                 return NULL;
2065         }
2066         state->ev = ev;
2067         state->cli = cli;
2068         state->op_num = op_num;
2069         state->req_data = req_data;
2070         state->req_data_sent = 0;
2071         state->call_id = get_rpc_call_id();
2072 
2073         if (cli->max_xmit_frag
2074             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2075                 /* Server is screwed up ! */
2076                 status = NT_STATUS_INVALID_PARAMETER;
2077                 goto post_status;
2078         }
2079 
2080         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2081 
2082         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2083                       state, MARSHALL)) {
2084                 status = NT_STATUS_NO_MEMORY;
2085                 goto post_status;
2086         }
2087 
2088         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2089 
2090         status = prepare_next_frag(state, &is_last_frag);
2091         if (!NT_STATUS_IS_OK(status)) {
2092                 goto post_status;
2093         }
2094 
2095         if (is_last_frag) {
2096                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2097                                            &state->outgoing_frag,
2098                                            RPC_RESPONSE);
2099                 if (subreq == NULL) {
2100                         status = NT_STATUS_NO_MEMORY;
2101                         goto post_status;
2102                 }
2103                 subreq->async.fn = rpc_api_pipe_req_done;
2104                 subreq->async.priv = result;
2105         } else {
2106                 subreq = rpc_write_send(
2107                         state, ev, cli->transport,
2108                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2109                         prs_offset(&state->outgoing_frag));
2110                 if (subreq == NULL) {
2111                         status = NT_STATUS_NO_MEMORY;
2112                         goto post_status;
2113                 }
2114                 subreq->async.fn = rpc_api_pipe_req_write_done;
2115                 subreq->async.priv = result;
2116         }
2117         return result;
2118 
2119  post_status:
2120         if (async_post_ntstatus(result, ev, status)) {
2121                 return result;
2122         }
2123         TALLOC_FREE(result);
2124         return NULL;
2125 }
2126 
2127 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
     /* [<][>][^][v][top][bottom][index][help] */
2128                                   bool *is_last_frag)
2129 {
2130         RPC_HDR hdr;
2131         RPC_HDR_REQ hdr_req;
2132         uint32_t data_sent_thistime;
2133         uint16_t auth_len;
2134         uint16_t frag_len;
2135         uint8_t flags = 0;
2136         uint32_t ss_padding;
2137         uint32_t data_left;
2138         char pad[8] = { 0, };
2139         NTSTATUS status;
2140 
2141         data_left = prs_offset(state->req_data) - state->req_data_sent;
2142 
2143         data_sent_thistime = calculate_data_len_tosend(
2144                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2145 
2146         if (state->req_data_sent == 0) {
2147                 flags = RPC_FLG_FIRST;
2148         }
2149 
2150         if (data_sent_thistime == data_left) {
2151                 flags |= RPC_FLG_LAST;
2152         }
2153 
2154         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2155                 return NT_STATUS_NO_MEMORY;
2156         }
2157 
2158         /* Create and marshall the header and request header. */
2159         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2160                      auth_len);
2161 
2162         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2163                 return NT_STATUS_NO_MEMORY;
2164         }
2165 
2166         /* Create the rpc request RPC_HDR_REQ */
2167         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2168                          state->op_num);
2169 
2170         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2171                                 &state->outgoing_frag, 0)) {
2172                 return NT_STATUS_NO_MEMORY;
2173         }
2174 
2175         /* Copy in the data, plus any ss padding. */
2176         if (!prs_append_some_prs_data(&state->outgoing_frag,
2177                                       state->req_data, state->req_data_sent,
2178                                       data_sent_thistime)) {
2179                 return NT_STATUS_NO_MEMORY;
2180         }
2181 
2182         /* Copy the sign/seal padding data. */
2183         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2184                 return NT_STATUS_NO_MEMORY;
2185         }
2186 
2187         /* Generate any auth sign/seal and add the auth footer. */
2188         switch (state->cli->auth->auth_type) {
2189         case PIPE_AUTH_TYPE_NONE:
2190                 status = NT_STATUS_OK;
2191                 break;
2192         case PIPE_AUTH_TYPE_NTLMSSP:
2193         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2194                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2195                                                  &state->outgoing_frag);
2196                 break;
2197         case PIPE_AUTH_TYPE_SCHANNEL:
2198                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2199                                                   &state->outgoing_frag);
2200                 break;
2201         default:
2202                 status = NT_STATUS_INVALID_PARAMETER;
2203                 break;
2204         }
2205 
2206         state->req_data_sent += data_sent_thistime;
2207         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2208 
2209         return status;
2210 }
2211 
2212 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
2213 {
2214         struct async_req *req = talloc_get_type_abort(
2215                 subreq->async.priv, struct async_req);
2216         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2217                 req->private_data, struct rpc_api_pipe_req_state);
2218         NTSTATUS status;
2219         bool is_last_frag;
2220 
2221         status = rpc_write_recv(subreq);
2222         TALLOC_FREE(subreq);
2223         if (!NT_STATUS_IS_OK(status)) {
2224                 async_req_nterror(req, status);
2225                 return;
2226         }
2227 
2228         status = prepare_next_frag(state, &is_last_frag);
2229         if (!NT_STATUS_IS_OK(status)) {
2230                 async_req_nterror(req, status);
2231                 return;
2232         }
2233 
2234         if (is_last_frag) {
2235                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2236                                            &state->outgoing_frag,
2237                                            RPC_RESPONSE);
2238                 if (async_req_nomem(subreq, req)) {
2239                         return;
2240                 }
2241                 subreq->async.fn = rpc_api_pipe_req_done;
2242                 subreq->async.priv = req;
2243         } else {
2244                 subreq = rpc_write_send(
2245                         state, state->ev,
2246                         state->cli->transport,
2247                         (uint8_t *)prs_data_p(&state->outgoing_frag),
2248                         prs_offset(&state->outgoing_frag));
2249                 if (async_req_nomem(subreq, req)) {
2250                         return;
2251                 }
2252                 subreq->async.fn = rpc_api_pipe_req_write_done;
2253                 subreq->async.priv = req;
2254         }
2255 }
2256 
2257 static void rpc_api_pipe_req_done(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
2258 {
2259         struct async_req *req = talloc_get_type_abort(
2260                 subreq->async.priv, struct async_req);
2261         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2262                 req->private_data, struct rpc_api_pipe_req_state);
2263         NTSTATUS status;
2264 
2265         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2266         TALLOC_FREE(subreq);
2267         if (!NT_STATUS_IS_OK(status)) {
2268                 async_req_nterror(req, status);
2269                 return;
2270         }
2271         async_req_done(req);
2272 }
2273 
2274 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
2275                                prs_struct *reply_pdu)
2276 {
2277         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2278                 req->private_data, struct rpc_api_pipe_req_state);
2279         NTSTATUS status;
2280 
2281         if (async_req_is_nterror(req, &status)) {
2282                 /*
2283                  * We always have to initialize to reply pdu, even if there is
2284                  * none. The rpccli_* caller routines expect this.
2285                  */
2286                 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2287                 return status;
2288         }
2289 
2290         *reply_pdu = state->reply_pdu;
2291         reply_pdu->mem_ctx = mem_ctx;
2292 
2293         /*
2294          * Prevent state->req_pdu from being freed in
2295          * rpc_api_pipe_req_state_destructor()
2296          */
2297         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2298 
2299         return NT_STATUS_OK;
2300 }
2301 
2302 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
2303                         uint8 op_num,
2304                         prs_struct *in_data,
2305                         prs_struct *out_data)
2306 {
2307         TALLOC_CTX *frame = talloc_stackframe();
2308         struct event_context *ev;
2309         struct async_req *req;
2310         NTSTATUS status = NT_STATUS_NO_MEMORY;
2311 
2312         ev = event_context_init(frame);
2313         if (ev == NULL) {
2314                 goto fail;
2315         }
2316 
2317         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2318         if (req == NULL) {
2319                 goto fail;
2320         }
2321 
2322         while (req->state < ASYNC_REQ_DONE) {
2323                 event_loop_once(ev);
2324         }
2325 
2326         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2327  
2328         /*
2329          * NT_STATUS_IO_TIMEOUT indicates network problem,
2330          * tear the connection apart.
2331          */
2332         if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
2333                 if (cli->transport->transport == NCACN_IP_TCP ||
2334                     cli->transport->transport == NCALRPC) {
2335                         rpccli_close_sock_fd(cli);
2336                 }
2337  
2338                 if (cli->transport->transport == NCACN_NP) {
2339                         rpccli_close_np_fd(cli);
2340                 }
2341         }
2342  fail:
2343         TALLOC_FREE(frame);
2344         return status;
2345 }
2346 
2347 #if 0
2348 /****************************************************************************
2349  Set the handle state.
2350 ****************************************************************************/
2351 
2352 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
2353                                    const char *pipe_name, uint16 device_state)
2354 {
2355         bool state_set = False;
2356         char param[2];
2357         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2358         char *rparam = NULL;
2359         char *rdata = NULL;
2360         uint32 rparam_len, rdata_len;
2361 
2362         if (pipe_name == NULL)
2363                 return False;
2364 
2365         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2366                  cli->fnum, pipe_name, device_state));
2367 
2368         /* create parameters: device state */
2369         SSVAL(param, 0, device_state);
2370 
2371         /* create setup parameters. */
2372         setup[0] = 0x0001; 
2373         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2374 
2375         /* send the data on \PIPE\ */
2376         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2377                     setup, 2, 0,                /* setup, length, max */
2378                     param, 2, 0,                /* param, length, max */
2379                     NULL, 0, 1024,              /* data, length, max */
2380                     &rparam, &rparam_len,        /* return param, length */
2381                     &rdata, &rdata_len))         /* return data, length */
2382         {
2383                 DEBUG(5, ("Set Handle state: return OK\n"));
2384                 state_set = True;
2385         }
2386 
2387         SAFE_FREE(rparam);
2388         SAFE_FREE(rdata);
2389 
2390         return state_set;
2391 }
2392 #endif
2393 
2394 /****************************************************************************
2395  Check the rpc bind acknowledge response.
2396 ****************************************************************************/
2397 
2398 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
     /* [<][>][^][v][top][bottom][index][help] */
2399 {
2400         if ( hdr_ba->addr.len == 0) {
2401                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2402         }
2403 
2404         /* check the transfer syntax */
2405         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2406              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2407                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2408                 return False;
2409         }
2410 
2411         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2412                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2413                           hdr_ba->res.num_results, hdr_ba->res.reason));
2414         }
2415 
2416         DEBUG(5,("check_bind_response: accepted!\n"));
2417         return True;
2418 }
2419 
2420 /*******************************************************************
2421  Creates a DCE/RPC bind authentication response.
2422  This is the packet that is sent back to the server once we
2423  have received a BIND-ACK, to finish the third leg of
2424  the authentication handshake.
2425  ********************************************************************/
2426 
2427 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
2428                                 uint32 rpc_call_id,
2429                                 enum pipe_auth_type auth_type,
2430                                 enum pipe_auth_level auth_level,
2431                                 DATA_BLOB *pauth_blob,
2432                                 prs_struct *rpc_out)
2433 {
2434         RPC_HDR hdr;
2435         RPC_HDR_AUTH hdr_auth;
2436         uint32 pad = 0;
2437 
2438         /* Create the request RPC_HDR */
2439         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2440                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2441                      pauth_blob->length );
2442 
2443         /* Marshall it. */
2444         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2445                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2446                 return NT_STATUS_NO_MEMORY;
2447         }
2448 
2449         /*
2450                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2451                 about padding - shouldn't this pad to length 8 ? JRA.
2452         */
2453 
2454         /* 4 bytes padding. */
2455         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2456                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2457                 return NT_STATUS_NO_MEMORY;
2458         }
2459 
2460         /* Create the request RPC_HDR_AUTHA */
2461         init_rpc_hdr_auth(&hdr_auth,
2462                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2463                         auth_level, 0, 1);
2464 
2465         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2466                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2467                 return NT_STATUS_NO_MEMORY;
2468         }
2469 
2470         /*
2471          * Append the auth data to the outgoing buffer.
2472          */
2473 
2474         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2475                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2476                 return NT_STATUS_NO_MEMORY;
2477         }
2478 
2479         return NT_STATUS_OK;
2480 }
2481 
2482 /*******************************************************************
2483  Creates a DCE/RPC bind alter context authentication request which
2484  may contain a spnego auth blobl
2485  ********************************************************************/
2486 
2487 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
     /* [<][>][^][v][top][bottom][index][help] */
2488                                         const RPC_IFACE *abstract,
2489                                         const RPC_IFACE *transfer,
2490                                         enum pipe_auth_level auth_level,
2491                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2492                                         prs_struct *rpc_out)
2493 {
2494         RPC_HDR_AUTH hdr_auth;
2495         prs_struct auth_info;
2496         NTSTATUS ret = NT_STATUS_OK;
2497 
2498         ZERO_STRUCT(hdr_auth);
2499         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2500                 return NT_STATUS_NO_MEMORY;
2501 
2502         /* We may change the pad length before marshalling. */
2503         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2504 
2505         if (pauth_blob->length) {
2506                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2507                         prs_mem_free(&auth_info);
2508                         return NT_STATUS_NO_MEMORY;
2509                 }
2510         }
2511 
2512         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2513                                                 rpc_out, 
2514                                                 rpc_call_id,
2515                                                 abstract,
2516                                                 transfer,
2517                                                 &hdr_auth,
2518                                                 &auth_info);
2519         prs_mem_free(&auth_info);
2520         return ret;
2521 }
2522 
2523 /****************************************************************************
2524  Do an rpc bind.
2525 ****************************************************************************/
2526 
2527 struct rpc_pipe_bind_state {
2528         struct event_context *ev;
2529         struct rpc_pipe_client *cli;
2530         prs_struct rpc_out;
2531         uint32_t rpc_call_id;
2532 };
2533 
2534 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
     /* [<][>][^][v][top][bottom][index][help] */
2535 {
2536         prs_mem_free(&state->rpc_out);
2537         return 0;
2538 }
2539 
2540 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2541 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2542                                            struct rpc_pipe_bind_state *state,
2543                                            struct rpc_hdr_info *phdr,
2544                                            prs_struct *reply_pdu);
2545 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2546 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2547                                                     struct rpc_pipe_bind_state *state,
2548                                                     struct rpc_hdr_info *phdr,
2549                                                     prs_struct *reply_pdu);
2550 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2551 
2552 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
2553                                      struct event_context *ev,
2554                                      struct rpc_pipe_client *cli,
2555                                      struct cli_pipe_auth_data *auth)
2556 {
2557         struct async_req *result, *subreq;
2558         struct rpc_pipe_bind_state *state;
2559         NTSTATUS status;
2560 
2561         if (!async_req_setup(mem_ctx, &result, &state,
2562                              struct rpc_pipe_bind_state)) {
2563                 return NULL;
2564         }
2565 
2566         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2567                 rpccli_pipe_txt(debug_ctx(), cli),
2568                 (unsigned int)auth->auth_type,
2569                 (unsigned int)auth->auth_level ));
2570 
2571         state->ev = ev;
2572         state->cli = cli;
2573         state->rpc_call_id = get_rpc_call_id();
2574 
2575         prs_init_empty(&state->rpc_out, state, MARSHALL);
2576         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2577 
2578         cli->auth = talloc_move(cli, &auth);
2579 
2580         /* Marshall the outgoing data. */
2581         status = create_rpc_bind_req(cli, &state->rpc_out,
2582                                      state->rpc_call_id,
2583                                      &cli->abstract_syntax,
2584                                      &cli->transfer_syntax,
2585                                      cli->auth->auth_type,
2586                                      cli->auth->auth_level);
2587 
2588         if (!NT_STATUS_IS_OK(status)) {
2589                 goto post_status;
2590         }
2591 
2592         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2593                                    RPC_BINDACK);
2594         if (subreq == NULL) {
2595                 status = NT_STATUS_NO_MEMORY;
2596                 goto post_status;
2597         }
2598         subreq->async.fn = rpc_pipe_bind_step_one_done;
2599         subreq->async.priv = result;
2600         return result;
2601 
2602  post_status:
2603         if (async_post_ntstatus(result, ev, status)) {
2604                 return result;
2605         }
2606         TALLOC_FREE(result);
2607         return NULL;
2608 }
2609 
2610 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
2611 {
2612         struct async_req *req = talloc_get_type_abort(
2613                 subreq->async.priv, struct async_req);
2614         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2615                 req->private_data, struct rpc_pipe_bind_state);
2616         prs_struct reply_pdu;
2617         struct rpc_hdr_info hdr;
2618         struct rpc_hdr_ba_info hdr_ba;
2619         NTSTATUS status;
2620 
2621         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2622         TALLOC_FREE(subreq);
2623         if (!NT_STATUS_IS_OK(status)) {
2624                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2625                           rpccli_pipe_txt(debug_ctx(), state->cli),
2626                           nt_errstr(status)));
2627                 async_req_nterror(req, status);
2628                 return;
2629         }
2630 
2631         /* Unmarshall the RPC header */
2632         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2633                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2634                 prs_mem_free(&reply_pdu);
2635                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2636                 return;
2637         }
2638 
2639         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2640                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2641                           "RPC_HDR_BA.\n"));
2642                 prs_mem_free(&reply_pdu);
2643                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2644                 return;
2645         }
2646 
2647         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2648                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2649                 prs_mem_free(&reply_pdu);
2650                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2651                 return;
2652         }
2653 
2654         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2655         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2656 
2657         /*
2658          * For authenticated binds we may need to do 3 or 4 leg binds.
2659          */
2660 
2661         switch(state->cli->auth->auth_type) {
2662 
2663         case PIPE_AUTH_TYPE_NONE:
2664         case PIPE_AUTH_TYPE_SCHANNEL:
2665                 /* Bind complete. */
2666                 prs_mem_free(&reply_pdu);
2667                 async_req_done(req);
2668                 break;
2669 
2670         case PIPE_AUTH_TYPE_NTLMSSP:
2671                 /* Need to send AUTH3 packet - no reply. */
2672                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2673                                                     &reply_pdu);
2674                 prs_mem_free(&reply_pdu);
2675                 if (!NT_STATUS_IS_OK(status)) {
2676                         async_req_nterror(req, status);
2677                 }
2678                 break;
2679 
2680         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2681                 /* Need to send alter context request and reply. */
2682                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2683                                                              &reply_pdu);
2684                 prs_mem_free(&reply_pdu);
2685                 if (!NT_STATUS_IS_OK(status)) {
2686                         async_req_nterror(req, status);
2687                 }
2688                 break;
2689 
2690         case PIPE_AUTH_TYPE_KRB5:
2691                 /* */
2692 
2693         default:
2694                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2695                          (unsigned int)state->cli->auth->auth_type));
2696                 prs_mem_free(&reply_pdu);
2697                 async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2698         }
2699 }
2700 
2701 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
     /* [<][>][^][v][top][bottom][index][help] */
2702                                            struct rpc_pipe_bind_state *state,
2703                                            struct rpc_hdr_info *phdr,
2704                                            prs_struct *reply_pdu)
2705 {
2706         DATA_BLOB server_response = data_blob_null;
2707         DATA_BLOB client_reply = data_blob_null;
2708         struct rpc_hdr_auth_info hdr_auth;
2709         struct async_req *subreq;
2710         NTSTATUS status;
2711 
2712         if ((phdr->auth_len == 0)
2713             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2714                 return NT_STATUS_INVALID_PARAMETER;
2715         }
2716 
2717         if (!prs_set_offset(
2718                     reply_pdu,
2719                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2720                 return NT_STATUS_INVALID_PARAMETER;
2721         }
2722 
2723         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2724                 return NT_STATUS_INVALID_PARAMETER;
2725         }
2726 
2727         /* TODO - check auth_type/auth_level match. */
2728 
2729         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2730         prs_copy_data_out((char *)server_response.data, reply_pdu,
2731                           phdr->auth_len);
2732 
2733         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2734                                 server_response, &client_reply);
2735 
2736         if (!NT_STATUS_IS_OK(status)) {
2737                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2738                           "blob failed: %s.\n", nt_errstr(status)));
2739                 return status;
2740         }
2741 
2742         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2743 
2744         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2745                                        state->cli->auth->auth_type,
2746                                        state->cli->auth->auth_level,
2747                                        &client_reply, &state->rpc_out);
2748         data_blob_free(&client_reply);
2749 
2750         if (!NT_STATUS_IS_OK(status)) {
2751                 return status;
2752         }
2753 
2754         subreq = rpc_write_send(state, state->ev, state->cli->transport,
2755                                 (uint8_t *)prs_data_p(&state->rpc_out),
2756                                 prs_offset(&state->rpc_out));
2757         if (subreq == NULL) {
2758                 return NT_STATUS_NO_MEMORY;
2759         }
2760         subreq->async.fn = rpc_bind_auth3_write_done;
2761         subreq->async.priv = req;
2762         return NT_STATUS_OK;
2763 }
2764 
2765 static void rpc_bind_auth3_write_done(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
2766 {
2767         struct async_req *req = talloc_get_type_abort(
2768                 subreq->async.priv, struct async_req);
2769         NTSTATUS status;
2770 
2771         status = rpc_write_recv(subreq);
2772         TALLOC_FREE(subreq);
2773         if (!NT_STATUS_IS_OK(status)) {
2774                 async_req_nterror(req, status);
2775                 return;
2776         }
2777         async_req_done(req);
2778 }
2779 
2780 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
     /* [<][>][^][v][top][bottom][index][help] */
2781                                                     struct rpc_pipe_bind_state *state,
2782                                                     struct rpc_hdr_info *phdr,
2783                                                     prs_struct *reply_pdu)
2784 {
2785         DATA_BLOB server_spnego_response = data_blob_null;
2786         DATA_BLOB server_ntlm_response = data_blob_null;
2787         DATA_BLOB client_reply = data_blob_null;
2788         DATA_BLOB tmp_blob = data_blob_null;
2789         RPC_HDR_AUTH hdr_auth;
2790         struct async_req *subreq;
2791         NTSTATUS status;
2792 
2793         if ((phdr->auth_len == 0)
2794             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2795                 return NT_STATUS_INVALID_PARAMETER;
2796         }
2797 
2798         /* Process the returned NTLMSSP blob first. */
2799         if (!prs_set_offset(
2800                     reply_pdu,
2801                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2802                 return NT_STATUS_INVALID_PARAMETER;
2803         }
2804 
2805         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2806                 return NT_STATUS_INVALID_PARAMETER;
2807         }
2808 
2809         server_spnego_response = data_blob(NULL, phdr->auth_len);
2810         prs_copy_data_out((char *)server_spnego_response.data,
2811                           reply_pdu, phdr->auth_len);
2812 
2813         /*
2814          * The server might give us back two challenges - tmp_blob is for the
2815          * second.
2816          */
2817         if (!spnego_parse_challenge(server_spnego_response,
2818                                     &server_ntlm_response, &tmp_blob)) {
2819                 data_blob_free(&server_spnego_response);
2820                 data_blob_free(&server_ntlm_response);
2821                 data_blob_free(&tmp_blob);
2822                 return NT_STATUS_INVALID_PARAMETER;
2823         }
2824 
2825         /* We're finished with the server spnego response and the tmp_blob. */
2826         data_blob_free(&server_spnego_response);
2827         data_blob_free(&tmp_blob);
2828 
2829         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2830                                 server_ntlm_response, &client_reply);
2831 
2832         /* Finished with the server_ntlm response */
2833         data_blob_free(&server_ntlm_response);
2834 
2835         if (!NT_STATUS_IS_OK(status)) {
2836                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2837                           "using server blob failed.\n"));
2838                 data_blob_free(&client_reply);
2839                 return status;
2840         }
2841 
2842         /* SPNEGO wrap the client reply. */
2843         tmp_blob = spnego_gen_auth(client_reply);
2844         data_blob_free(&client_reply);
2845         client_reply = tmp_blob;
2846         tmp_blob = data_blob_null;
2847 
2848         /* Now prepare the alter context pdu. */
2849         prs_init_empty(&state->rpc_out, state, MARSHALL);
2850 
2851         status = create_rpc_alter_context(state->rpc_call_id,
2852                                           &state->cli->abstract_syntax,
2853                                           &state->cli->transfer_syntax,
2854                                           state->cli->auth->auth_level,
2855                                           &client_reply,
2856                                           &state->rpc_out);
2857         data_blob_free(&client_reply);
2858 
2859         if (!NT_STATUS_IS_OK(status)) {
2860                 return status;
2861         }
2862 
2863         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2864                                    &state->rpc_out, RPC_ALTCONTRESP);
2865         if (subreq == NULL) {
2866                 return NT_STATUS_NO_MEMORY;
2867         }
2868         subreq->async.fn = rpc_bind_ntlmssp_api_done;
2869         subreq->async.priv = req;
2870         return NT_STATUS_OK;
2871 }
2872 
2873 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
     /* [<][>][^][v][top][bottom][index][help] */
2874 {
2875         struct async_req *req = talloc_get_type_abort(
2876                 subreq->async.priv, struct async_req);
2877         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2878                 req->private_data, struct rpc_pipe_bind_state);
2879         DATA_BLOB server_spnego_response = data_blob_null;
2880         DATA_BLOB tmp_blob = data_blob_null;
2881         prs_struct reply_pdu;
2882         struct rpc_hdr_info hdr;
2883         struct rpc_hdr_auth_info hdr_auth;
2884         NTSTATUS status;
2885 
2886         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2887         TALLOC_FREE(subreq);
2888         if (!NT_STATUS_IS_OK(status)) {
2889                 async_req_nterror(req, status);
2890                 return;
2891         }
2892 
2893         /* Get the auth blob from the reply. */
2894         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2895                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2896                           "unmarshall RPC_HDR.\n"));
2897                 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2898                 return;
2899         }
2900 
2901         if (!prs_set_offset(
2902                     &reply_pdu,
2903                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2904                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2905                 return;
2906         }
2907 
2908         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2909                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2910                 return;
2911         }
2912 
2913         server_spnego_response = data_blob(NULL, hdr.auth_len);
2914         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2915                           hdr.auth_len);
2916 
2917         /* Check we got a valid auth response. */
2918         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2919                                         OID_NTLMSSP, &tmp_blob)) {
2920                 data_blob_free(&server_spnego_response);
2921                 data_blob_free(&tmp_blob);
2922                 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2923                 return;
2924         }
2925 
2926         data_blob_free(&server_spnego_response);
2927         data_blob_free(&tmp_blob);
2928 
2929         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2930                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2931         async_req_done(req);
2932 }
2933 
2934 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
     /* [<][>][^][v][top][bottom][index][help] */
2935 {
2936         return async_req_simple_recv_ntstatus(req);
2937 }
2938 
2939 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
     /* [<][>][^][v][top][bottom][index][help] */
2940                        struct cli_pipe_auth_data *auth)
2941 {
2942         TALLOC_CTX *frame = talloc_stackframe();
2943         struct event_context *ev;
2944         struct async_req *req;
2945         NTSTATUS status = NT_STATUS_NO_MEMORY;
2946 
2947         ev = event_context_init(frame);
2948         if (ev == NULL) {
2949                 goto fail;
2950         }
2951 
2952         req = rpc_pipe_bind_send(frame, ev, cli, auth);
2953         if (req == NULL) {
2954                 goto fail;
2955         }
2956 
2957         while (req->state < ASYNC_REQ_DONE) {
2958                 event_loop_once(ev);
2959         }
2960 
2961         status = rpc_pipe_bind_recv(req);
2962  fail:
2963         TALLOC_FREE(frame);
2964         return status;
2965 }
2966 
2967 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
     /* [<][>][^][v][top][bottom][index][help] */
2968                                 unsigned int timeout)
2969 {
2970         struct cli_state *cli;
2971 
2972         if (rpc_cli->transport->transport == NCACN_NP) {
2973                 cli = rpc_pipe_np_smb_conn(rpc_cli);
2974                 if (cli == NULL) {
2975                         return 0;
2976                 }
2977                 return cli_set_timeout(cli, timeout);
2978         }
2979 
2980         if (rpc_cli->transport->transport == NCACN_IP_TCP ||
2981             rpc_cli->transport->transport == NCALRPC) {
2982                 return rpccli_set_sock_timeout(rpc_cli, timeout);
2983         }
2984 
2985         if (rpc_cli->transport->transport == NCACN_INTERNAL) {
2986                 cli = rpc_pipe_smbd_smb_conn(rpc_cli);
2987                 if (!cli) {
2988                         return 0;
2989                 }
2990                 return cli_set_timeout(cli, timeout);
2991         }
2992 
2993         return 0;
2994 }
2995 
2996 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
     /* [<][>][^][v][top][bottom][index][help] */
2997 {
2998         struct cli_state *cli;
2999 
3000         if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
3001             || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
3002                 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
3003                 return true;
3004         }
3005 
3006         cli = rpc_pipe_np_smb_conn(rpc_cli);
3007         if (cli == NULL) {
3008                 return false;
3009         }
3010         E_md4hash(cli->password ? cli->password : "", nt_hash);
3011         return true;
3012 }
3013 
3014 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
3015                                struct cli_pipe_auth_data **presult)
3016 {
3017         struct cli_pipe_auth_data *result;
3018 
3019         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3020         if (result == NULL) {
3021                 return NT_STATUS_NO_MEMORY;
3022         }
3023 
3024         result->auth_type = PIPE_AUTH_TYPE_NONE;
3025         result->auth_level = PIPE_AUTH_LEVEL_NONE;
3026 
3027         result->user_name = talloc_strdup(result, "");
3028         result->domain = talloc_strdup(result, "");
3029         if ((result->user_name == NULL) || (result->domain == NULL)) {
3030                 TALLOC_FREE(result);
3031                 return NT_STATUS_NO_MEMORY;
3032         }
3033 
3034         *presult = result;
3035         return NT_STATUS_OK;
3036 }
3037 
3038 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
     /* [<][>][^][v][top][bottom][index][help] */
3039 {
3040         ntlmssp_end(&auth->a_u.ntlmssp_state);
3041         return 0;
3042 }
3043 
3044 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
3045                                   enum pipe_auth_type auth_type,
3046                                   enum pipe_auth_level auth_level,
3047                                   const char *domain,
3048                                   const char *username,
3049                                   const char *password,
3050                                   struct cli_pipe_auth_data **presult)
3051 {
3052         struct cli_pipe_auth_data *result;
3053         NTSTATUS status;
3054 
3055         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3056         if (result == NULL) {
3057                 return NT_STATUS_NO_MEMORY;
3058         }
3059 
3060         result->auth_type = auth_type;
3061         result->auth_level = auth_level;
3062 
3063         result->user_name = talloc_strdup(result, username);
3064         result->domain = talloc_strdup(result, domain);
3065         if ((result->user_name == NULL) || (result->domain == NULL)) {
3066                 status = NT_STATUS_NO_MEMORY;
3067                 goto fail;
3068         }
3069 
3070         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3071         if (!NT_STATUS_IS_OK(status)) {
3072                 goto fail;
3073         }
3074 
3075         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3076 
3077         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3078         if (!NT_STATUS_IS_OK(status)) {
3079                 goto fail;
3080         }
3081 
3082         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3083         if (!NT_STATUS_IS_OK(status)) {
3084                 goto fail;
3085         }
3086 
3087         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3088         if (!NT_STATUS_IS_OK(status)) {
3089                 goto fail;
3090         }
3091 
3092         /*
3093          * Turn off sign+seal to allow selected auth level to turn it back on.
3094          */
3095         result->a_u.ntlmssp_state->neg_flags &=
3096                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3097 
3098         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3099                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3100         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3101                 result->a_u.ntlmssp_state->neg_flags
3102                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3103         }
3104 
3105         *presult = result;
3106         return NT_STATUS_OK;
3107 
3108  fail:
3109         TALLOC_FREE(result);
3110         return status;
3111 }
3112 
3113 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
     /* [<][>][^][v][top][bottom][index][help] */
3114                                    enum pipe_auth_level auth_level,
3115                                    const uint8_t sess_key[16],
3116                                    struct cli_pipe_auth_data **presult)
3117 {
3118         struct cli_pipe_auth_data *result;
3119 
3120         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3121         if (result == NULL) {
3122                 return NT_STATUS_NO_MEMORY;
3123         }
3124 
3125         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3126         result->auth_level = auth_level;
3127 
3128         result->user_name = talloc_strdup(result, "");
3129         result->domain = talloc_strdup(result, domain);
3130         if ((result->user_name == NULL) || (result->domain == NULL)) {
3131                 goto fail;
3132         }
3133 
3134         result->a_u.schannel_auth = talloc(result,
3135                                            struct schannel_auth_struct);
3136         if (result->a_u.schannel_auth == NULL) {
3137                 goto fail;
3138         }
3139 
3140         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3141                sizeof(result->a_u.schannel_auth->sess_key));
3142         result->a_u.schannel_auth->seq_num = 0;
3143 
3144         *presult = result;
3145         return NT_STATUS_OK;
3146 
3147  fail:
3148         TALLOC_FREE(result);
3149         return NT_STATUS_NO_MEMORY;
3150 }
3151 
3152 #ifdef HAVE_KRB5
3153 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
     /* [<][>][^][v][top][bottom][index][help] */
3154 {
3155         data_blob_free(&auth->session_key);
3156         return 0;
3157 }
3158 #endif
3159 
3160 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
3161                                    enum pipe_auth_level auth_level,
3162                                    const char *service_princ,
3163                                    const char *username,
3164                                    const char *password,
3165                                    struct cli_pipe_auth_data **presult)
3166 {
3167 #ifdef HAVE_KRB5
3168         struct cli_pipe_auth_data *result;
3169 
3170         if ((username != NULL) && (password != NULL)) {
3171                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3172                 if (ret != 0) {
3173                         return NT_STATUS_ACCESS_DENIED;
3174                 }
3175         }
3176 
3177         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3178         if (result == NULL) {
3179                 return NT_STATUS_NO_MEMORY;
3180         }
3181 
3182         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3183         result->auth_level = auth_level;
3184 
3185         /*
3186          * Username / domain need fixing!
3187          */
3188         result->user_name = talloc_strdup(result, "");
3189         result->domain = talloc_strdup(result, "");
3190         if ((result->user_name == NULL) || (result->domain == NULL)) {
3191                 goto fail;
3192         }
3193 
3194         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3195                 result, struct kerberos_auth_struct);
3196         if (result->a_u.kerberos_auth == NULL) {
3197                 goto fail;
3198         }
3199         talloc_set_destructor(result->a_u.kerberos_auth,
3200                               cli_auth_kerberos_data_destructor);
3201 
3202         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3203                 result, service_princ);
3204         if (result->a_u.kerberos_auth->service_principal == NULL) {
3205                 goto fail;
3206         }
3207 
3208         *presult = result;
3209         return NT_STATUS_OK;
3210 
3211  fail:
3212         TALLOC_FREE(result);
3213         return NT_STATUS_NO_MEMORY;
3214 #else
3215         return NT_STATUS_NOT_SUPPORTED;
3216 #endif
3217 }
3218 
3219 /**
3220  * Create an rpc pipe client struct, connecting to a tcp port.
3221  */
3222 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
     /* [<][>][^][v][top][bottom][index][help] */
3223                                        uint16_t port,
3224                                        const struct ndr_syntax_id *abstract_syntax,
3225                                        struct rpc_pipe_client **presult)
3226 {
3227         struct rpc_pipe_client *result;
3228         struct sockaddr_storage addr;
3229         NTSTATUS status;
3230         int fd;
3231 
3232         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3233         if (result == NULL) {
3234                 return NT_STATUS_NO_MEMORY;
3235         }
3236 
3237         result->abstract_syntax = *abstract_syntax;
3238         result->transfer_syntax = ndr_transfer_syntax;
3239         result->dispatch = cli_do_rpc_ndr;
3240 
3241         result->desthost = talloc_strdup(result, host);
3242         result->srv_name_slash = talloc_asprintf_strupper_m(
3243                 result, "\\\\%s", result->desthost);
3244         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3245                 status = NT_STATUS_NO_MEMORY;
3246                 goto fail;
3247         }
3248 
3249         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3250         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3251 
3252         if (!resolve_name(host, &addr, 0)) {
3253                 status = NT_STATUS_NOT_FOUND;
3254                 goto fail;
3255         }
3256 
3257         status = open_socket_out(&addr, port, 60, &fd);
3258         if (!NT_STATUS_IS_OK(status)) {
3259                 goto fail;
3260         }
3261         set_socket_options(fd, lp_socket_options());
3262 
3263         status = rpc_transport_sock_init(result, fd, &result->transport);
3264         if (!NT_STATUS_IS_OK(status)) {
3265                 close(fd);
3266                 goto fail;
3267         }
3268 
3269         result->transport->transport = NCACN_IP_TCP;
3270 
3271         *presult = result;
3272         return NT_STATUS_OK;
3273 
3274  fail:
3275         TALLOC_FREE(result);
3276         return status;
3277 }
3278 
3279 /**
3280  * Determine the tcp port on which a dcerpc interface is listening
3281  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3282  * target host.
3283  */
3284 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
     /* [<][>][^][v][top][bottom][index][help] */
3285                                       const struct ndr_syntax_id *abstract_syntax,
3286                                       uint16_t *pport)
3287 {
3288         NTSTATUS status;
3289         struct rpc_pipe_client *epm_pipe = NULL;
3290         struct cli_pipe_auth_data *auth = NULL;
3291         struct dcerpc_binding *map_binding = NULL;
3292         struct dcerpc_binding *res_binding = NULL;
3293         struct epm_twr_t *map_tower = NULL;
3294         struct epm_twr_t *res_towers = NULL;
3295         struct policy_handle *entry_handle = NULL;
3296         uint32_t num_towers = 0;
3297         uint32_t max_towers = 1;
3298         struct epm_twr_p_t towers;
3299         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3300 
3301         if (pport == NULL) {
3302                 status = NT_STATUS_INVALID_PARAMETER;
3303                 goto done;
3304         }
3305 
3306         /* open the connection to the endpoint mapper */
3307         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3308                                         &ndr_table_epmapper.syntax_id,
3309                                         &epm_pipe);
3310 
3311         if (!NT_STATUS_IS_OK(status)) {
3312                 goto done;
3313         }
3314 
3315         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3316         if (!NT_STATUS_IS_OK(status)) {
3317                 goto done;
3318         }
3319 
3320         status = rpc_pipe_bind(epm_pipe, auth);
3321         if (!NT_STATUS_IS_OK(status)) {
3322                 goto done;
3323         }
3324 
3325         /* create tower for asking the epmapper */
3326 
3327         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3328         if (map_binding == NULL) {
3329                 status = NT_STATUS_NO_MEMORY;
3330                 goto done;
3331         }
3332 
3333         map_binding->transport = NCACN_IP_TCP;
3334         map_binding->object = *abstract_syntax;
3335         map_binding->host = host; /* needed? */
3336         map_binding->endpoint = "0"; /* correct? needed? */
3337 
3338         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3339         if (map_tower == NULL) {
3340                 status = NT_STATUS_NO_MEMORY;
3341                 goto done;
3342         }
3343 
3344         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3345                                             &(map_tower->tower));
3346         if (!NT_STATUS_IS_OK(status)) {
3347                 goto done;
3348         }
3349 
3350         /* allocate further parameters for the epm_Map call */
3351 
3352         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3353         if (res_towers == NULL) {
3354                 status = NT_STATUS_NO_MEMORY;
3355                 goto done;
3356         }
3357         towers.twr = res_towers;
3358 
3359         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3360         if (entry_handle == NULL) {
3361                 status = NT_STATUS_NO_MEMORY;
3362                 goto done;
3363         }
3364 
3365         /* ask the endpoint mapper for the port */
3366 
3367         status = rpccli_epm_Map(epm_pipe,
3368                                 tmp_ctx,
3369                                 CONST_DISCARD(struct GUID *,
3370                                               &(abstract_syntax->uuid)),
3371                                 map_tower,
3372                                 entry_handle,
3373                                 max_towers,
3374                                 &num_towers,
3375                                 &towers);
3376 
3377         if (!NT_STATUS_IS_OK(status)) {
3378                 goto done;
3379         }
3380 
3381         if (num_towers != 1) {
3382                 status = NT_STATUS_UNSUCCESSFUL;
3383                 goto done;
3384         }
3385 
3386         /* extract the port from the answer */
3387 
3388         status = dcerpc_binding_from_tower(tmp_ctx,
3389                                            &(towers.twr->tower),
3390                                            &res_binding);
3391         if (!NT_STATUS_IS_OK(status)) {
3392                 goto done;
3393         }
3394 
3395         /* are further checks here necessary? */
3396         if (res_binding->transport != NCACN_IP_TCP) {
3397                 status = NT_STATUS_UNSUCCESSFUL;
3398                 goto done;
3399         }
3400 
3401         *pport = (uint16_t)atoi(res_binding->endpoint);
3402 
3403 done:
3404         TALLOC_FREE(tmp_ctx);
3405         return status;
3406 }
3407 
3408 /**
3409  * Create a rpc pipe client struct, connecting to a host via tcp.
3410  * The port is determined by asking the endpoint mapper on the given
3411  * host.
3412  */
3413 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
     /* [<][>][^][v][top][bottom][index][help] */
3414                            const struct ndr_syntax_id *abstract_syntax,
3415                            struct rpc_pipe_client **presult)
3416 {
3417         NTSTATUS status;
3418         uint16_t port = 0;
3419 
3420         *presult = NULL;
3421 
3422         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3423         if (!NT_STATUS_IS_OK(status)) {
3424                 goto done;
3425         }
3426 
3427         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3428                                         abstract_syntax, presult);
3429 
3430 done:
3431         return status;
3432 }
3433 
3434 /********************************************************************
3435  Create a rpc pipe client struct, connecting to a unix domain socket
3436  ********************************************************************/
3437 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
     /* [<][>][^][v][top][bottom][index][help] */
3438                                const struct ndr_syntax_id *abstract_syntax,
3439                                struct rpc_pipe_client **presult)
3440 {
3441         struct rpc_pipe_client *result;
3442         struct sockaddr_un addr;
3443         NTSTATUS status;
3444         int fd;
3445 
3446         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3447         if (result == NULL) {
3448                 return NT_STATUS_NO_MEMORY;
3449         }
3450 
3451         result->abstract_syntax = *abstract_syntax;
3452         result->transfer_syntax = ndr_transfer_syntax;
3453         result->dispatch = cli_do_rpc_ndr;
3454 
3455         result->desthost = get_myname(result);
3456         result->srv_name_slash = talloc_asprintf_strupper_m(
3457                 result, "\\\\%s", result->desthost);
3458         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3459                 status = NT_STATUS_NO_MEMORY;
3460                 goto fail;
3461         }
3462 
3463         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3464         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3465 
3466         fd = socket(AF_UNIX, SOCK_STREAM, 0);
3467         if (fd == -1) {
3468                 status = map_nt_error_from_unix(errno);
3469                 goto fail;
3470         }
3471 
3472         ZERO_STRUCT(addr);
3473         addr.sun_family = AF_UNIX;
3474         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3475 
3476         if (sys_connect(fd, (struct sockaddr *)&addr) == -1) {
3477                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3478                           strerror(errno)));
3479                 close(fd);
3480                 return map_nt_error_from_unix(errno);
3481         }
3482 
3483         status = rpc_transport_sock_init(result, fd, &result->transport);
3484         if (!NT_STATUS_IS_OK(status)) {
3485                 close(fd);
3486                 goto fail;
3487         }
3488 
3489         result->transport->transport = NCALRPC;
3490 
3491         *presult = result;
3492         return NT_STATUS_OK;
3493 
3494  fail:
3495         TALLOC_FREE(result);
3496         return status;
3497 }
3498 
3499 static int rpc_pipe_client_np_destructor(struct rpc_pipe_client *p)
     /* [<][>][^][v][top][bottom][index][help] */
3500 {
3501         struct cli_state *cli;
3502 
3503         cli = rpc_pipe_np_smb_conn(p);
3504         if (cli != NULL) {
3505                 DLIST_REMOVE(cli->pipe_list, p);
3506         }
3507         return 0;
3508 }
3509 
3510 /****************************************************************************
3511  Open a named pipe over SMB to a remote server.
3512  *
3513  * CAVEAT CALLER OF THIS FUNCTION:
3514  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3515  *    so be sure that this function is called AFTER any structure (vs pointer)
3516  *    assignment of the cli.  In particular, libsmbclient does structure
3517  *    assignments of cli, which invalidates the data in the returned
3518  *    rpc_pipe_client if this function is called before the structure assignment
3519  *    of cli.
3520  * 
3521  ****************************************************************************/
3522 
3523 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
3524                                  const struct ndr_syntax_id *abstract_syntax,
3525                                  struct rpc_pipe_client **presult)
3526 {
3527         struct rpc_pipe_client *result;
3528         NTSTATUS status;
3529 
3530         /* sanity check to protect against crashes */
3531 
3532         if ( !cli ) {
3533                 return NT_STATUS_INVALID_HANDLE;
3534         }
3535 
3536         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3537         if (result == NULL) {
3538                 return NT_STATUS_NO_MEMORY;
3539         }
3540 
3541         result->abstract_syntax = *abstract_syntax;
3542         result->transfer_syntax = ndr_transfer_syntax;
3543         result->dispatch = cli_do_rpc_ndr;
3544         result->desthost = talloc_strdup(result, cli->desthost);
3545         result->srv_name_slash = talloc_asprintf_strupper_m(
3546                 result, "\\\\%s", result->desthost);
3547 
3548         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3549         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3550 
3551         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3552                 TALLOC_FREE(result);
3553                 return NT_STATUS_NO_MEMORY;
3554         }
3555 
3556         status = rpc_transport_np_init(result, cli, abstract_syntax,
3557                                        &result->transport);
3558         if (!NT_STATUS_IS_OK(status)) {
3559                 TALLOC_FREE(result);
3560                 return status;
3561         }
3562 
3563         result->transport->transport = NCACN_NP;
3564 
3565         DLIST_ADD(cli->pipe_list, result);
3566         talloc_set_destructor(result, rpc_pipe_client_np_destructor);
3567 
3568         *presult = result;
3569         return NT_STATUS_OK;
3570 }
3571 
3572 NTSTATUS rpc_pipe_open_local(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
3573                              struct rpc_cli_smbd_conn *conn,
3574                              const struct ndr_syntax_id *syntax,
3575                              struct rpc_pipe_client **presult)
3576 {
3577         struct rpc_pipe_client *result;
3578         struct cli_pipe_auth_data *auth;
3579         NTSTATUS status;
3580 
3581         result = talloc(mem_ctx, struct rpc_pipe_client);
3582         if (result == NULL) {
3583                 return NT_STATUS_NO_MEMORY;
3584         }
3585         result->abstract_syntax = *syntax;
3586         result->transfer_syntax = ndr_transfer_syntax;
3587         result->dispatch = cli_do_rpc_ndr;
3588         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3589         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3590 
3591         result->desthost = talloc_strdup(result, global_myname());
3592         result->srv_name_slash = talloc_asprintf_strupper_m(
3593                 result, "\\\\%s", global_myname());
3594         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3595                 TALLOC_FREE(result);
3596                 return NT_STATUS_NO_MEMORY;
3597         }
3598 
3599         status = rpc_transport_smbd_init(result, conn, syntax,
3600                                          &result->transport);
3601         if (!NT_STATUS_IS_OK(status)) {
3602                 DEBUG(1, ("rpc_transport_smbd_init failed: %s\n",
3603                           nt_errstr(status)));
3604                 TALLOC_FREE(result);
3605                 return status;
3606         }
3607 
3608         status = rpccli_anon_bind_data(result, &auth);
3609         if (!NT_STATUS_IS_OK(status)) {
3610                 DEBUG(1, ("rpccli_anon_bind_data failed: %s\n",
3611                           nt_errstr(status)));
3612                 TALLOC_FREE(result);
3613                 return status;
3614         }
3615 
3616         status = rpc_pipe_bind(result, auth);
3617         if (!NT_STATUS_IS_OK(status)) {
3618                 DEBUG(1, ("rpc_pipe_bind failed: %s\n", nt_errstr(status)));
3619                 TALLOC_FREE(result);
3620                 return status;
3621         }
3622 
3623         result->transport->transport = NCACN_INTERNAL;
3624 
3625         *presult = result;
3626         return NT_STATUS_OK;
3627 }
3628 
3629 /****************************************************************************
3630  Open a pipe to a remote server.
3631  ****************************************************************************/
3632 
3633 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
3634                                   enum dcerpc_transport_t transport,
3635                                   const struct ndr_syntax_id *interface,
3636                                   struct rpc_pipe_client **presult)
3637 {
3638         switch (transport) {
3639         case NCACN_IP_TCP:
3640                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3641                                          presult);
3642         case NCACN_NP:
3643                 return rpc_pipe_open_np(cli, interface, presult);
3644         default:
3645                 return NT_STATUS_NOT_IMPLEMENTED;
3646         }
3647 }
3648 
3649 /****************************************************************************
3650  Open a named pipe to an SMB server and bind anonymously.
3651  ****************************************************************************/
3652 
3653 NTSTATUS cli_rpc_pipe_open_noauth_transport(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
3654                                             enum dcerpc_transport_t transport,
3655                                             const struct ndr_syntax_id *interface,
3656                                             struct rpc_pipe_client **presult)
3657 {
3658         struct rpc_pipe_client *result;
3659         struct cli_pipe_auth_data *auth;
3660         NTSTATUS status;
3661 
3662         status = cli_rpc_pipe_open(cli, transport, interface, &result);
3663         if (!NT_STATUS_IS_OK(status)) {
3664                 return status;
3665         }
3666 
3667         status = rpccli_anon_bind_data(result, &auth);
3668         if (!NT_STATUS_IS_OK(status)) {
3669                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3670                           nt_errstr(status)));
3671                 TALLOC_FREE(result);
3672                 return status;
3673         }
3674 
3675         /*
3676          * This is a bit of an abstraction violation due to the fact that an
3677          * anonymous bind on an authenticated SMB inherits the user/domain
3678          * from the enclosing SMB creds
3679          */
3680 
3681         TALLOC_FREE(auth->user_name);
3682         TALLOC_FREE(auth->domain);
3683 
3684         auth->user_name = talloc_strdup(auth, cli->user_name);
3685         auth->domain = talloc_strdup(auth, cli->domain);
3686         auth->user_session_key = data_blob_talloc(auth,
3687                 cli->user_session_key.data,
3688                 cli->user_session_key.length);
3689 
3690         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3691                 TALLOC_FREE(result);
3692                 return NT_STATUS_NO_MEMORY;
3693         }
3694 
3695         status = rpc_pipe_bind(result, auth);
3696         if (!NT_STATUS_IS_OK(status)) {
3697                 int lvl = 0;
3698                 if (ndr_syntax_id_equal(interface,
3699                                         &ndr_table_dssetup.syntax_id)) {
3700                         /* non AD domains just don't have this pipe, avoid
3701                          * level 0 statement in that case - gd */
3702                         lvl = 3;
3703                 }
3704                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3705                             "%s failed with error %s\n",
3706                             get_pipe_name_from_iface(interface),
3707                             nt_errstr(status) ));
3708                 TALLOC_FREE(result);
3709                 return status;
3710         }
3711 
3712         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3713                   "%s and bound anonymously.\n",
3714                   get_pipe_name_from_iface(interface), cli->desthost));
3715 
3716         *presult = result;
3717         return NT_STATUS_OK;
3718 }
3719 
3720 /****************************************************************************
3721  ****************************************************************************/
3722 
3723 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
3724                                   const struct ndr_syntax_id *interface,
3725                                   struct rpc_pipe_client **presult)
3726 {
3727         return cli_rpc_pipe_open_noauth_transport(cli, NCACN_NP,
3728                                                   interface, presult);
3729 }
3730 
3731 /****************************************************************************
3732  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3733  ****************************************************************************/
3734 
3735 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
3736                                                    const struct ndr_syntax_id *interface,
3737                                                    enum dcerpc_transport_t transport,
3738                                                    enum pipe_auth_type auth_type,
3739                                                    enum pipe_auth_level auth_level,
3740                                                    const char *domain,
3741                                                    const char *username,
3742                                                    const char *password,
3743                                                    struct rpc_pipe_client **presult)
3744 {
3745         struct rpc_pipe_client *result;
3746         struct cli_pipe_auth_data *auth;
3747         NTSTATUS status;
3748 
3749         status = cli_rpc_pipe_open(cli, transport, interface, &result);
3750         if (!NT_STATUS_IS_OK(status)) {
3751                 return status;
3752         }
3753 
3754         status = rpccli_ntlmssp_bind_data(
3755                 result, auth_type, auth_level, domain, username,
3756                 password, &auth);
3757         if (!NT_STATUS_IS_OK(status)) {
3758                 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3759                           nt_errstr(status)));
3760                 goto err;
3761         }
3762 
3763         status = rpc_pipe_bind(result, auth);
3764         if (!NT_STATUS_IS_OK(status)) {
3765                 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3766                         nt_errstr(status) ));
3767                 goto err;
3768         }
3769 
3770         DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3771                 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3772                   get_pipe_name_from_iface(interface), cli->desthost, domain,
3773                   username ));
3774 
3775         *presult = result;
3776         return NT_STATUS_OK;
3777 
3778   err:
3779 
3780         TALLOC_FREE(result);
3781         return status;
3782 }
3783 
3784 /****************************************************************************
3785  External interface.
3786  Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3787  ****************************************************************************/
3788 
3789 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
3790                                    const struct ndr_syntax_id *interface,
3791                                    enum dcerpc_transport_t transport,
3792                                    enum pipe_auth_level auth_level,
3793                                    const char *domain,
3794                                    const char *username,
3795                                    const char *password,
3796                                    struct rpc_pipe_client **presult)
3797 {
3798         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3799                                                 interface,
3800                                                 transport,
3801                                                 PIPE_AUTH_TYPE_NTLMSSP,
3802                                                 auth_level,
3803                                                 domain,
3804                                                 username,
3805                                                 password,
3806                                                 presult);
3807 }
3808 
3809 /****************************************************************************
3810  External interface.
3811  Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3812  ****************************************************************************/
3813 
3814 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
3815                                           const struct ndr_syntax_id *interface,
3816                                           enum dcerpc_transport_t transport,
3817                                           enum pipe_auth_level auth_level,
3818                                           const char *domain,
3819                                           const char *username,
3820                                           const char *password,
3821                                           struct rpc_pipe_client **presult)
3822 {
3823         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3824                                                 interface,
3825                                                 transport,
3826                                                 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3827                                                 auth_level,
3828                                                 domain,
3829                                                 username,
3830                                                 password,
3831                                                 presult);
3832 }
3833 
3834 /****************************************************************************
3835   Get a the schannel session key out of an already opened netlogon pipe.
3836  ****************************************************************************/
3837 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
     /* [<][>][^][v][top][bottom][index][help] */
3838                                                 struct cli_state *cli,
3839                                                 const char *domain,
3840                                                 uint32 *pneg_flags)
3841 {
3842         uint32 sec_chan_type = 0;
3843         unsigned char machine_pwd[16];
3844         const char *machine_account;
3845         NTSTATUS status;
3846 
3847         /* Get the machine account credentials from secrets.tdb. */
3848         if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3849                                &sec_chan_type))
3850         {
3851                 DEBUG(0, ("get_schannel_session_key: could not fetch "
3852                         "trust account password for domain '%s'\n",
3853                         domain));
3854                 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3855         }
3856 
3857         status = rpccli_netlogon_setup_creds(netlogon_pipe,
3858                                         cli->desthost, /* server name */
3859                                         domain,        /* domain */
3860                                         global_myname(), /* client name */
3861                                         machine_account, /* machine account name */
3862                                         machine_pwd,
3863                                         sec_chan_type,
3864                                         pneg_flags);
3865 
3866         if (!NT_STATUS_IS_OK(status)) {
3867                 DEBUG(3, ("get_schannel_session_key_common: "
3868                           "rpccli_netlogon_setup_creds failed with result %s "
3869                           "to server %s, domain %s, machine account %s.\n",
3870                           nt_errstr(status), cli->desthost, domain,
3871                           machine_account ));
3872                 return status;
3873         }
3874 
3875         if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3876                 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3877                         cli->desthost));
3878                 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3879         }
3880 
3881         return NT_STATUS_OK;;
3882 }
3883 
3884 /****************************************************************************
3885  Open a netlogon pipe and get the schannel session key.
3886  Now exposed to external callers.
3887  ****************************************************************************/
3888 
3889 
3890 NTSTATUS get_schannel_session_key(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
3891                                   const char *domain,
3892                                   uint32 *pneg_flags,
3893                                   struct rpc_pipe_client **presult)
3894 {
3895         struct rpc_pipe_client *netlogon_pipe = NULL;
3896         NTSTATUS status;
3897 
3898         status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3899                                           &netlogon_pipe);
3900         if (!NT_STATUS_IS_OK(status)) {
3901                 return status;
3902         }
3903 
3904         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3905                                                  pneg_flags);
3906         if (!NT_STATUS_IS_OK(status)) {
3907                 TALLOC_FREE(netlogon_pipe);
3908                 return status;
3909         }
3910 
3911         *presult = netlogon_pipe;
3912         return NT_STATUS_OK;
3913 }
3914 
3915 /****************************************************************************
3916  External interface.
3917  Open a named pipe to an SMB server and bind using schannel (bind type 68)
3918  using session_key. sign and seal.
3919  ****************************************************************************/
3920 
3921 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
3922                                              const struct ndr_syntax_id *interface,
3923                                              enum dcerpc_transport_t transport,
3924                                              enum pipe_auth_level auth_level,
3925                                              const char *domain,
3926                                              const struct dcinfo *pdc,
3927                                              struct rpc_pipe_client **presult)
3928 {
3929         struct rpc_pipe_client *result;
3930         struct cli_pipe_auth_data *auth;
3931         NTSTATUS status;
3932 
3933         status = cli_rpc_pipe_open(cli, transport, interface, &result);
3934         if (!NT_STATUS_IS_OK(status)) {
3935                 return status;
3936         }
3937 
3938         status = rpccli_schannel_bind_data(result, domain, auth_level,
3939                                            pdc->sess_key, &auth);
3940         if (!NT_STATUS_IS_OK(status)) {
3941                 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3942                           nt_errstr(status)));
3943                 TALLOC_FREE(result);
3944                 return status;
3945         }
3946 
3947         status = rpc_pipe_bind(result, auth);
3948         if (!NT_STATUS_IS_OK(status)) {
3949                 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3950                           "cli_rpc_pipe_bind failed with error %s\n",
3951                           nt_errstr(status) ));
3952                 TALLOC_FREE(result);
3953                 return status;
3954         }
3955 
3956         /*
3957          * The credentials on a new netlogon pipe are the ones we are passed
3958          * in - copy them over.
3959          */
3960         result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3961         if (result->dc == NULL) {
3962                 DEBUG(0, ("talloc failed\n"));
3963                 TALLOC_FREE(result);
3964                 return NT_STATUS_NO_MEMORY;
3965         }
3966 
3967         DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3968                   "for domain %s and bound using schannel.\n",
3969                   get_pipe_name_from_iface(interface),
3970                   cli->desthost, domain ));
3971 
3972         *presult = result;
3973         return NT_STATUS_OK;
3974 }
3975 
3976 /****************************************************************************
3977  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3978  Fetch the session key ourselves using a temporary netlogon pipe. This
3979  version uses an ntlmssp auth bound netlogon pipe to get the key.
3980  ****************************************************************************/
3981 
3982 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
3983                                                       const char *domain,
3984                                                       const char *username,
3985                                                       const char *password,
3986                                                       uint32 *pneg_flags,
3987                                                       struct rpc_pipe_client **presult)
3988 {
3989         struct rpc_pipe_client *netlogon_pipe = NULL;
3990         NTSTATUS status;
3991 
3992         status = cli_rpc_pipe_open_spnego_ntlmssp(
3993                 cli, &ndr_table_netlogon.syntax_id, NCACN_NP,
3994                 PIPE_AUTH_LEVEL_PRIVACY,
3995                 domain, username, password, &netlogon_pipe);
3996         if (!NT_STATUS_IS_OK(status)) {
3997                 return status;
3998         }
3999 
4000         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
4001                                                  pneg_flags);
4002         if (!NT_STATUS_IS_OK(status)) {
4003                 TALLOC_FREE(netlogon_pipe);
4004                 return status;
4005         }
4006 
4007         *presult = netlogon_pipe;
4008         return NT_STATUS_OK;
4009 }
4010 
4011 /****************************************************************************
4012  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4013  Fetch the session key ourselves using a temporary netlogon pipe. This version
4014  uses an ntlmssp bind to get the session key.
4015  ****************************************************************************/
4016 
4017 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
4018                                                  const struct ndr_syntax_id *interface,
4019                                                  enum dcerpc_transport_t transport,
4020                                                  enum pipe_auth_level auth_level,
4021                                                  const char *domain,
4022                                                  const char *username,
4023                                                  const char *password,
4024                                                  struct rpc_pipe_client **presult)
4025 {
4026         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4027         struct rpc_pipe_client *netlogon_pipe = NULL;
4028         struct rpc_pipe_client *result = NULL;
4029         NTSTATUS status;
4030 
4031         status = get_schannel_session_key_auth_ntlmssp(
4032                 cli, domain, username, password, &neg_flags, &netlogon_pipe);
4033         if (!NT_STATUS_IS_OK(status)) {
4034                 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
4035                         "key from server %s for domain %s.\n",
4036                         cli->desthost, domain ));
4037                 return status;
4038         }
4039 
4040         status = cli_rpc_pipe_open_schannel_with_key(
4041                 cli, interface, transport, auth_level, domain, netlogon_pipe->dc,
4042                 &result);
4043 
4044         /* Now we've bound using the session key we can close the netlog pipe. */
4045         TALLOC_FREE(netlogon_pipe);
4046 
4047         if (NT_STATUS_IS_OK(status)) {
4048                 *presult = result;
4049         }
4050         return status;
4051 }
4052 
4053 /****************************************************************************
4054  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4055  Fetch the session key ourselves using a temporary netlogon pipe.
4056  ****************************************************************************/
4057 
4058 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
4059                                     const struct ndr_syntax_id *interface,
4060                                     enum dcerpc_transport_t transport,
4061                                     enum pipe_auth_level auth_level,
4062                                     const char *domain,
4063                                     struct rpc_pipe_client **presult)
4064 {
4065         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4066         struct rpc_pipe_client *netlogon_pipe = NULL;
4067         struct rpc_pipe_client *result = NULL;
4068         NTSTATUS status;
4069 
4070         status = get_schannel_session_key(cli, domain, &neg_flags,
4071                                           &netlogon_pipe);
4072         if (!NT_STATUS_IS_OK(status)) {
4073                 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4074                         "key from server %s for domain %s.\n",
4075                         cli->desthost, domain ));
4076                 return status;
4077         }
4078 
4079         status = cli_rpc_pipe_open_schannel_with_key(
4080                 cli, interface, transport, auth_level, domain, netlogon_pipe->dc,
4081                 &result);
4082 
4083         /* Now we've bound using the session key we can close the netlog pipe. */
4084         TALLOC_FREE(netlogon_pipe);
4085 
4086         if (NT_STATUS_IS_OK(status)) {
4087                 *presult = result;
4088         }
4089 
4090         return NT_STATUS_OK;
4091 }
4092 
4093 /****************************************************************************
4094  Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4095  The idea is this can be called with service_princ, username and password all
4096  NULL so long as the caller has a TGT.
4097  ****************************************************************************/
4098 
4099 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
     /* [<][>][^][v][top][bottom][index][help] */
4100                                 const struct ndr_syntax_id *interface,
4101                                 enum pipe_auth_level auth_level,
4102                                 const char *service_princ,
4103                                 const char *username,
4104                                 const char *password,
4105                                 struct rpc_pipe_client **presult)
4106 {
4107 #ifdef HAVE_KRB5
4108         struct rpc_pipe_client *result;
4109         struct cli_pipe_auth_data *auth;
4110         NTSTATUS status;
4111 
4112         status = cli_rpc_pipe_open(cli, NCACN_NP, interface, &result);
4113         if (!NT_STATUS_IS_OK(status)) {
4114                 return status;
4115         }
4116 
4117         status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4118                                            username, password, &auth);
4119         if (!NT_STATUS_IS_OK(status)) {
4120                 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4121                           nt_errstr(status)));
4122                 TALLOC_FREE(result);
4123                 return status;
4124         }
4125 
4126         status = rpc_pipe_bind(result, auth);
4127         if (!NT_STATUS_IS_OK(status)) {
4128                 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4129                           "with error %s\n", nt_errstr(status)));
4130                 TALLOC_FREE(result);
4131                 return status;
4132         }
4133 
4134         *presult = result;
4135         return NT_STATUS_OK;
4136 #else
4137         DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4138         return NT_STATUS_NOT_IMPLEMENTED;
4139 #endif
4140 }
4141 
4142 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
     /* [<][>][^][v][top][bottom][index][help] */
4143                              struct rpc_pipe_client *cli,
4144                              DATA_BLOB *session_key)
4145 {
4146         if (!session_key || !cli) {
4147                 return NT_STATUS_INVALID_PARAMETER;
4148         }
4149 
4150         if (!cli->auth) {
4151                 return NT_STATUS_INVALID_PARAMETER;
4152         }
4153 
4154         switch (cli->auth->auth_type) {
4155                 case PIPE_AUTH_TYPE_SCHANNEL:
4156                         *session_key = data_blob_talloc(mem_ctx,
4157                                 cli->auth->a_u.schannel_auth->sess_key, 16);
4158                         break;
4159                 case PIPE_AUTH_TYPE_NTLMSSP:
4160                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4161                         *session_key = data_blob_talloc(mem_ctx,
4162                                 cli->auth->a_u.ntlmssp_state->session_key.data,
4163                                 cli->auth->a_u.ntlmssp_state->session_key.length);
4164                         break;
4165                 case PIPE_AUTH_TYPE_KRB5:
4166                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4167                         *session_key = data_blob_talloc(mem_ctx,
4168                                 cli->auth->a_u.kerberos_auth->session_key.data,
4169                                 cli->auth->a_u.kerberos_auth->session_key.length);
4170                         break;
4171                 case PIPE_AUTH_TYPE_NONE:
4172                         *session_key = data_blob_talloc(mem_ctx,
4173                                 cli->auth->user_session_key.data,
4174                                 cli->auth->user_session_key.length);
4175                         break;
4176                 default:
4177                         return NT_STATUS_NO_USER_SESSION_KEY;
4178         }
4179 
4180         return NT_STATUS_OK;
4181 }

/* [<][>][^][v][top][bottom][index][help] */