Line data Source code
1 : /*
2 : * RPC host
3 : *
4 : * Implements samba-dcerpcd service.
5 : *
6 : * This program is free software; you can redistribute it and/or modify
7 : * it under the terms of the GNU General Public License as published by
8 : * the Free Software Foundation; either version 3 of the License, or
9 : * (at your option) any later version.
10 : *
11 : * This program is distributed in the hope that it will be useful,
12 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 : * GNU General Public License for more details.
15 : *
16 : * You should have received a copy of the GNU General Public License
17 : * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 : */
19 :
20 : /*
21 : * This binary has two usage modes:
22 : *
23 : * In the normal case when invoked from smbd or winbind it is given a
24 : * directory to scan via --libexec-rpcds and will invoke on demand any
25 : * binaries it finds there starting with rpcd_ when a named pipe
26 : * connection is requested.
27 : *
28 : * In the second mode it can be started explicitly from system startup
29 : * scripts.
30 : *
31 : * When Samba is set up as an Active Directory Domain Controller the
32 : * normal samba binary overrides and provides DCERPC services, whilst
33 : * allowing samba-dcerpcd to provide the services that smbd used to
34 : * provide in that set-up, such as SRVSVC.
35 : *
36 : * The second mode can also be useful for use outside of the Samba framework,
37 : * for example, use with the Linux kernel SMB2 server ksmbd. In this mode
38 : * it behaves like inetd and listens on sockets on behalf of RPC server
39 : * implementations.
40 : */
41 :
42 : #include "replace.h"
43 : #include <fnmatch.h>
44 : #include "lib/cmdline/cmdline.h"
45 : #include "lib/cmdline/closefrom_except.h"
46 : #include "source3/include/includes.h"
47 : #include "source3/include/auth.h"
48 : #include "rpc_sock_helper.h"
49 : #include "messages.h"
50 : #include "lib/util_file.h"
51 : #include "lib/util/tevent_unix.h"
52 : #include "lib/util/tevent_ntstatus.h"
53 : #include "lib/util/smb_strtox.h"
54 : #include "lib/util/debug.h"
55 : #include "lib/util/server_id.h"
56 : #include "lib/util/util_tdb.h"
57 : #include "lib/tdb_wrap/tdb_wrap.h"
58 : #include "lib/async_req/async_sock.h"
59 : #include "librpc/rpc/dcerpc_util.h"
60 : #include "lib/tsocket/tsocket.h"
61 : #include "libcli/named_pipe_auth/npa_tstream.h"
62 : #include "librpc/gen_ndr/ndr_rpc_host.h"
63 : #include "source3/param/loadparm.h"
64 : #include "source3/lib/global_contexts.h"
65 : #include "lib/util/strv.h"
66 : #include "lib/util/pidfile.h"
67 : #include "source3/rpc_client/cli_pipe.h"
68 : #include "librpc/gen_ndr/ndr_epmapper.h"
69 : #include "librpc/gen_ndr/ndr_epmapper_c.h"
70 : #include "nsswitch/winbind_client.h"
71 : #include "libcli/security/dom_sid.h"
72 : #include "libcli/security/security_token.h"
73 :
74 : extern bool override_logfile;
75 :
76 : struct rpc_server;
77 : struct rpc_work_process;
78 :
79 : /*
80 : * samba-dcerpcd state to keep track of rpcd_* servers.
81 : */
82 : struct rpc_host {
83 : struct messaging_context *msg_ctx;
84 : struct rpc_server **servers;
85 : struct tdb_wrap *epmdb;
86 :
87 : int worker_stdin[2];
88 :
89 : bool np_helper;
90 :
91 : /*
92 : * If we're started with --np-helper but nobody contacts us,
93 : * we need to exit after a while. This will be deleted once
94 : * the first real client connects and our self-exit mechanism
95 : * when we don't have any worker processes left kicks in.
96 : */
97 : struct tevent_timer *np_helper_shutdown;
98 : };
99 :
100 : /*
101 : * Map a RPC interface to a name. Used when filling the endpoint
102 : * mapper database
103 : */
104 : struct rpc_host_iface_name {
105 : struct ndr_syntax_id iface;
106 : char *name;
107 : };
108 :
109 : /*
110 : * rpc_host representation for listening sockets. ncacn_ip_tcp might
111 : * listen on multiple explicit IPs, all with the same port.
112 : */
113 : struct rpc_host_endpoint {
114 : struct rpc_server *server;
115 : struct dcerpc_binding *binding;
116 : struct ndr_syntax_id *interfaces;
117 : int *fds;
118 : size_t num_fds;
119 : };
120 :
121 : /*
122 : * Staging area until we sent the socket plus bind to the helper
123 : */
124 : struct rpc_host_pending_client {
125 : struct rpc_host_pending_client *prev, *next;
126 :
127 : /*
128 : * Pointer for the destructor to remove us from the list of
129 : * pending clients
130 : */
131 : struct rpc_server *server;
132 :
133 : /*
134 : * Waiter for client exit before a helper accepted the request
135 : */
136 : struct tevent_req *hangup_wait;
137 :
138 : /*
139 : * Info to pick the worker
140 : */
141 : struct ncacn_packet *bind_pkt;
142 :
143 : /*
144 : * This is what we send down to the worker
145 : */
146 : int sock;
147 : struct rpc_host_client *client;
148 : };
149 :
150 : /*
151 : * Representation of one worker process. For each rpcd_* executable
152 : * there will be more of than one of these.
153 : */
154 : struct rpc_work_process {
155 : pid_t pid;
156 :
157 : /*
158 : * !available means:
159 : *
160 : * Worker forked but did not send its initial status yet (not
161 : * yet initialized)
162 : *
163 : * Worker died, but we did not receive SIGCHLD yet. We noticed
164 : * it because we couldn't send it a message.
165 : */
166 : bool available;
167 :
168 : /*
169 : * Incremented by us when sending a client, decremented by
170 : * MSG_RPC_HOST_WORKER_STATUS sent by workers whenever a
171 : * client exits.
172 : */
173 : uint32_t num_clients;
174 :
175 : /*
176 : * Send SHUTDOWN to an idle child after a while
177 : */
178 : struct tevent_timer *exit_timer;
179 : };
180 :
181 : /*
182 : * State for a set of running instances of an rpcd_* server executable
183 : */
184 : struct rpc_server {
185 : struct rpc_host *host;
186 : /*
187 : * Index into the rpc_host_state->servers array
188 : */
189 : uint32_t server_index;
190 :
191 : const char *rpc_server_exe;
192 :
193 : struct rpc_host_endpoint **endpoints;
194 : struct rpc_host_iface_name *iface_names;
195 :
196 : size_t max_workers;
197 : size_t idle_seconds;
198 :
199 : /*
200 : * "workers" can be larger than "max_workers": Internal
201 : * connections require an idle worker to avoid deadlocks
202 : * between RPC servers: netlogon requires samr, everybody
203 : * requires winreg. And if a deep call in netlogon asks for a
204 : * samr connection, this must never end up in the same
205 : * process. named_pipe_auth_req_info7->need_idle_server is set
206 : * in those cases.
207 : */
208 : struct rpc_work_process *workers;
209 :
210 : struct rpc_host_pending_client *pending_clients;
211 : };
212 :
213 : struct rpc_server_get_endpoints_state {
214 : char **argl;
215 : char *ncalrpc_endpoint;
216 : enum dcerpc_transport_t only_transport;
217 :
218 : struct rpc_host_iface_name *iface_names;
219 : struct rpc_host_endpoint **endpoints;
220 :
221 : unsigned long num_workers;
222 : unsigned long idle_seconds;
223 : };
224 :
225 : static void rpc_server_get_endpoints_done(struct tevent_req *subreq);
226 :
227 : /**
228 : * @brief Query interfaces from an rpcd helper
229 : *
230 : * Spawn a rpcd helper, ask it for the interfaces it serves via
231 : * --list-interfaces, parse the output
232 : *
233 : * @param[in] mem_ctx Memory context for the tevent_req
234 : * @param[in] ev Event context to run this on
235 : * @param[in] rpc_server_exe Binary to ask with --list-interfaces
236 : * @param[in] only_transport Filter out anything but this
237 : * @return The tevent_req representing this process
238 : */
239 :
240 272 : static struct tevent_req *rpc_server_get_endpoints_send(
241 : TALLOC_CTX *mem_ctx,
242 : struct tevent_context *ev,
243 : const char *rpc_server_exe,
244 : enum dcerpc_transport_t only_transport)
245 : {
246 272 : struct tevent_req *req = NULL, *subreq = NULL;
247 272 : struct rpc_server_get_endpoints_state *state = NULL;
248 272 : const char *progname = NULL;
249 :
250 272 : req = tevent_req_create(
251 : mem_ctx, &state, struct rpc_server_get_endpoints_state);
252 272 : if (req == NULL) {
253 0 : return NULL;
254 : }
255 272 : state->only_transport = only_transport;
256 :
257 272 : progname = strrchr(rpc_server_exe, '/');
258 272 : if (progname != NULL) {
259 272 : progname += 1;
260 : } else {
261 0 : progname = rpc_server_exe;
262 : }
263 :
264 272 : state->ncalrpc_endpoint = talloc_strdup(state, progname);
265 272 : if (tevent_req_nomem(state->ncalrpc_endpoint, req)) {
266 0 : return tevent_req_post(req, ev);
267 : }
268 :
269 272 : state->argl = talloc_array(state, char *, 4);
270 272 : if (tevent_req_nomem(state->argl, req)) {
271 0 : return tevent_req_post(req, ev);
272 : }
273 :
274 272 : state->argl = str_list_make_empty(state);
275 272 : str_list_add_printf(&state->argl, "%s", rpc_server_exe);
276 272 : str_list_add_printf(&state->argl, "--list-interfaces");
277 456 : str_list_add_printf(
278 272 : &state->argl, "--configfile=%s", get_dyn_CONFIGFILE());
279 :
280 272 : if (tevent_req_nomem(state->argl, req)) {
281 0 : return tevent_req_post(req, ev);
282 : }
283 :
284 272 : subreq = file_ploadv_send(state, ev, state->argl, 65536);
285 272 : if (tevent_req_nomem(subreq, req)) {
286 0 : return tevent_req_post(req, ev);
287 : }
288 272 : tevent_req_set_callback(subreq, rpc_server_get_endpoints_done, req);
289 272 : return req;
290 : }
291 :
292 : /*
293 : * Parse a line of format
294 : *
295 : * 338cd001-2244-31f1-aaaa-900038001003/0x00000001 winreg
296 : *
297 : * and add it to the "piface_names" array.
298 : */
299 :
300 518 : static struct rpc_host_iface_name *rpc_exe_parse_iface_line(
301 : TALLOC_CTX *mem_ctx,
302 : struct rpc_host_iface_name **piface_names,
303 : const char *line)
304 : {
305 518 : struct rpc_host_iface_name *iface_names = *piface_names;
306 518 : struct rpc_host_iface_name *tmp = NULL, *result = NULL;
307 518 : size_t i, num_ifaces = talloc_array_length(iface_names);
308 : struct ndr_syntax_id iface;
309 518 : char *name = NULL;
310 : bool ok;
311 :
312 518 : ok = ndr_syntax_id_from_string(line, &iface);
313 518 : if (!ok) {
314 0 : DBG_WARNING("ndr_syntax_id_from_string() failed for: [%s]\n",
315 : line);
316 0 : return NULL;
317 : }
318 :
319 518 : name = strchr(line, ' ');
320 518 : if (name == NULL) {
321 0 : return NULL;
322 : }
323 518 : name += 1;
324 :
325 1304 : for (i=0; i<num_ifaces; i++) {
326 786 : result = &iface_names[i];
327 :
328 786 : if (ndr_syntax_id_equal(&result->iface, &iface)) {
329 0 : return result;
330 : }
331 : }
332 :
333 518 : if (num_ifaces + 1 < num_ifaces) {
334 0 : return NULL;
335 : }
336 :
337 518 : name = talloc_strdup(mem_ctx, name);
338 518 : if (name == NULL) {
339 0 : return NULL;
340 : }
341 :
342 518 : tmp = talloc_realloc(
343 : mem_ctx,
344 : iface_names,
345 : struct rpc_host_iface_name,
346 : num_ifaces + 1);
347 518 : if (tmp == NULL) {
348 0 : TALLOC_FREE(name);
349 0 : return NULL;
350 : }
351 518 : iface_names = tmp;
352 :
353 518 : result = &iface_names[num_ifaces];
354 :
355 518 : *result = (struct rpc_host_iface_name) {
356 : .iface = iface,
357 518 : .name = talloc_move(iface_names, &name),
358 : };
359 :
360 518 : *piface_names = iface_names;
361 :
362 518 : return result;
363 : }
364 :
365 518 : static struct rpc_host_iface_name *rpc_host_iface_names_find(
366 : struct rpc_host_iface_name *iface_names,
367 : const struct ndr_syntax_id *iface)
368 : {
369 518 : size_t i, num_iface_names = talloc_array_length(iface_names);
370 :
371 1304 : for (i=0; i<num_iface_names; i++) {
372 1304 : struct rpc_host_iface_name *iface_name = &iface_names[i];
373 :
374 1304 : if (ndr_syntax_id_equal(iface, &iface_name->iface)) {
375 518 : return iface_name;
376 : }
377 : }
378 :
379 0 : return NULL;
380 : }
381 :
382 1648 : static bool dcerpc_binding_same_endpoint(
383 : const struct dcerpc_binding *b1, const struct dcerpc_binding *b2)
384 : {
385 1648 : enum dcerpc_transport_t t1 = dcerpc_binding_get_transport(b1);
386 1648 : enum dcerpc_transport_t t2 = dcerpc_binding_get_transport(b2);
387 1648 : const char *e1 = NULL, *e2 = NULL;
388 : int cmp;
389 :
390 1648 : if (t1 != t2) {
391 462 : return false;
392 : }
393 :
394 1186 : e1 = dcerpc_binding_get_string_option(b1, "endpoint");
395 1186 : e2 = dcerpc_binding_get_string_option(b2, "endpoint");
396 :
397 1186 : if ((e1 == NULL) && (e2 == NULL)) {
398 34 : return true;
399 : }
400 1152 : if ((e1 == NULL) || (e2 == NULL)) {
401 0 : return false;
402 : }
403 1152 : cmp = strcmp(e1, e2);
404 1152 : return (cmp == 0);
405 : }
406 :
407 : /**
408 : * @brief Filter whether we want to serve an endpoint
409 : *
410 : * samba-dcerpcd might want to serve all endpoints a rpcd reported to
411 : * us via --list-interfaces.
412 : *
413 : * In member mode, we only serve named pipes. Indicated by NCACN_NP
414 : * passed in via "only_transport".
415 : *
416 : * @param[in] binding Which binding is in question?
417 : * @param[in] only_transport Exclusive transport to serve
418 : * @return Do we want to serve "binding" from samba-dcerpcd?
419 : */
420 :
421 1312 : static bool rpc_host_serve_endpoint(
422 : struct dcerpc_binding *binding,
423 : enum dcerpc_transport_t only_transport)
424 : {
425 861 : enum dcerpc_transport_t transport =
426 451 : dcerpc_binding_get_transport(binding);
427 :
428 1312 : if (only_transport == NCA_UNKNOWN) {
429 : /* no filter around */
430 334 : return true;
431 : }
432 :
433 978 : if (transport != only_transport) {
434 : /* filter out */
435 520 : return false;
436 : }
437 :
438 458 : return true;
439 : }
440 :
441 1312 : static struct rpc_host_endpoint *rpc_host_endpoint_find(
442 : struct rpc_server_get_endpoints_state *state,
443 : const char *binding_string)
444 : {
445 1312 : size_t i, num_endpoints = talloc_array_length(state->endpoints);
446 1312 : struct rpc_host_endpoint **tmp = NULL, *ep = NULL;
447 : enum dcerpc_transport_t transport;
448 : NTSTATUS status;
449 : bool serve_this;
450 :
451 1312 : ep = talloc_zero(state, struct rpc_host_endpoint);
452 1312 : if (ep == NULL) {
453 0 : goto fail;
454 : }
455 :
456 1312 : status = dcerpc_parse_binding(ep, binding_string, &ep->binding);
457 1312 : if (!NT_STATUS_IS_OK(status)) {
458 0 : DBG_DEBUG("dcerpc_parse_binding(%s) failed: %s\n",
459 : binding_string,
460 : nt_errstr(status));
461 0 : goto fail;
462 : }
463 :
464 2173 : serve_this = rpc_host_serve_endpoint(
465 1312 : ep->binding, state->only_transport);
466 1312 : if (!serve_this) {
467 520 : goto fail;
468 : }
469 :
470 792 : transport = dcerpc_binding_get_transport(ep->binding);
471 :
472 792 : if (transport == NCALRPC) {
473 98 : const char *ncalrpc_sock = dcerpc_binding_get_string_option(
474 98 : ep->binding, "endpoint");
475 :
476 98 : if (ncalrpc_sock == NULL) {
477 : /*
478 : * generic ncalrpc:, set program-specific
479 : * socket name. epmapper will redirect clients
480 : * properly.
481 : */
482 90 : status = dcerpc_binding_set_string_option(
483 90 : ep->binding,
484 : "endpoint",
485 90 : state->ncalrpc_endpoint);
486 90 : if (!NT_STATUS_IS_OK(status)) {
487 0 : DBG_DEBUG("dcerpc_binding_set_string_option "
488 : "failed: %s\n",
489 : nt_errstr(status));
490 0 : goto fail;
491 : }
492 : }
493 : }
494 :
495 2304 : for (i=0; i<num_endpoints; i++) {
496 :
497 1648 : bool ok = dcerpc_binding_same_endpoint(
498 1648 : ep->binding, state->endpoints[i]->binding);
499 :
500 1648 : if (ok) {
501 136 : TALLOC_FREE(ep);
502 136 : return state->endpoints[i];
503 : }
504 : }
505 :
506 656 : if (num_endpoints + 1 < num_endpoints) {
507 0 : goto fail;
508 : }
509 :
510 656 : tmp = talloc_realloc(
511 : state,
512 : state->endpoints,
513 : struct rpc_host_endpoint *,
514 : num_endpoints + 1);
515 656 : if (tmp == NULL) {
516 0 : goto fail;
517 : }
518 656 : state->endpoints = tmp;
519 656 : state->endpoints[num_endpoints] = talloc_move(state->endpoints, &ep);
520 :
521 656 : return state->endpoints[num_endpoints];
522 520 : fail:
523 520 : TALLOC_FREE(ep);
524 520 : return NULL;
525 : }
526 :
527 792 : static bool ndr_interfaces_add_unique(
528 : TALLOC_CTX *mem_ctx,
529 : struct ndr_syntax_id **pifaces,
530 : const struct ndr_syntax_id *iface)
531 : {
532 792 : struct ndr_syntax_id *ifaces = *pifaces;
533 792 : size_t i, num_ifaces = talloc_array_length(ifaces);
534 :
535 984 : for (i=0; i<num_ifaces; i++) {
536 192 : if (ndr_syntax_id_equal(iface, &ifaces[i])) {
537 0 : return true;
538 : }
539 : }
540 :
541 792 : if (num_ifaces + 1 < num_ifaces) {
542 0 : return false;
543 : }
544 792 : ifaces = talloc_realloc(
545 : mem_ctx,
546 : ifaces,
547 : struct ndr_syntax_id,
548 : num_ifaces + 1);
549 792 : if (ifaces == NULL) {
550 0 : return false;
551 : }
552 792 : ifaces[num_ifaces] = *iface;
553 :
554 792 : *pifaces = ifaces;
555 792 : return true;
556 : }
557 :
558 : /*
559 : * Read the text reply from the rpcd_* process telling us what
560 : * endpoints it will serve when asked with --list-interfaces.
561 : */
562 272 : static void rpc_server_get_endpoints_done(struct tevent_req *subreq)
563 : {
564 272 : struct tevent_req *req = tevent_req_callback_data(
565 : subreq, struct tevent_req);
566 272 : struct rpc_server_get_endpoints_state *state = tevent_req_data(
567 : req, struct rpc_server_get_endpoints_state);
568 272 : struct rpc_host_iface_name *iface = NULL;
569 272 : uint8_t *buf = NULL;
570 : size_t buflen;
571 272 : char **lines = NULL;
572 : int ret, i, num_lines;
573 :
574 272 : ret = file_ploadv_recv(subreq, state, &buf);
575 272 : TALLOC_FREE(subreq);
576 272 : if (tevent_req_error(req, ret)) {
577 0 : return;
578 : }
579 :
580 272 : buflen = talloc_get_size(buf);
581 272 : if (buflen == 0) {
582 0 : tevent_req_done(req);
583 0 : return;
584 : }
585 :
586 272 : lines = file_lines_parse((char *)buf, buflen, &num_lines, state);
587 272 : if (tevent_req_nomem(lines, req)) {
588 0 : return;
589 : }
590 :
591 272 : if (num_lines < 2) {
592 0 : DBG_DEBUG("Got %d lines, expected at least 2\n", num_lines);
593 0 : tevent_req_error(req, EINVAL);
594 0 : return;
595 : }
596 :
597 272 : state->num_workers = smb_strtoul(
598 : lines[0], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
599 272 : if (ret != 0) {
600 0 : DBG_DEBUG("Could not parse num_workers(%s): %s\n",
601 : lines[0],
602 : strerror(ret));
603 0 : tevent_req_error(req, ret);
604 0 : return;
605 : }
606 :
607 360 : state->idle_seconds = smb_strtoul(
608 272 : lines[1], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
609 272 : if (ret != 0) {
610 0 : DBG_DEBUG("Could not parse idle_seconds (%s): %s\n",
611 : lines[1],
612 : strerror(ret));
613 0 : tevent_req_error(req, ret);
614 0 : return;
615 : }
616 :
617 272 : DBG_DEBUG("num_workers=%lu, idle_seconds=%lu for %s\n",
618 : state->num_workers,
619 : state->idle_seconds,
620 : state->argl[0]);
621 :
622 2102 : for (i=2; i<num_lines; i++) {
623 1830 : char *line = lines[i];
624 1830 : struct rpc_host_endpoint *endpoint = NULL;
625 : bool ok;
626 :
627 1830 : if (line[0] != ' ') {
628 518 : iface = rpc_exe_parse_iface_line(
629 : state, &state->iface_names, line);
630 518 : if (iface == NULL) {
631 0 : DBG_WARNING(
632 : "rpc_exe_parse_iface_line failed "
633 : "for: [%s] from %s\n",
634 : line,
635 : state->argl[0]);
636 0 : tevent_req_oom(req);
637 0 : return;
638 : }
639 518 : continue;
640 : }
641 :
642 1312 : if (iface == NULL) {
643 0 : DBG_DEBUG("Interface GUID line missing\n");
644 0 : tevent_req_error(req, EINVAL);
645 0 : return;
646 : }
647 :
648 1312 : endpoint = rpc_host_endpoint_find(state, line+1);
649 1312 : if (endpoint == NULL) {
650 520 : DBG_DEBUG("rpc_host_endpoint_find for %s failed\n",
651 : line+1);
652 520 : continue;
653 : }
654 :
655 792 : ok = ndr_interfaces_add_unique(
656 : endpoint,
657 : &endpoint->interfaces,
658 792 : &iface->iface);
659 792 : if (!ok) {
660 0 : DBG_DEBUG("ndr_interfaces_add_unique failed\n");
661 0 : tevent_req_oom(req);
662 0 : return;
663 : }
664 : }
665 :
666 272 : tevent_req_done(req);
667 : }
668 :
669 : /**
670 : * @brief Receive output from --list-interfaces
671 : *
672 : * @param[in] req The async req that just finished
673 : * @param[in] mem_ctx Where to put the output on
674 : * @param[out] endpoints The endpoints to be listened on
675 : * @param[out] iface_names Annotation for epm_Lookup's epm_entry_t
676 : * @return 0/errno
677 : */
678 272 : static int rpc_server_get_endpoints_recv(
679 : struct tevent_req *req,
680 : TALLOC_CTX *mem_ctx,
681 : struct rpc_host_endpoint ***endpoints,
682 : struct rpc_host_iface_name **iface_names,
683 : size_t *num_workers,
684 : size_t *idle_seconds)
685 : {
686 272 : struct rpc_server_get_endpoints_state *state = tevent_req_data(
687 : req, struct rpc_server_get_endpoints_state);
688 : int err;
689 :
690 272 : if (tevent_req_is_unix_error(req, &err)) {
691 0 : tevent_req_received(req);
692 0 : return err;
693 : }
694 :
695 272 : *endpoints = talloc_move(mem_ctx, &state->endpoints);
696 272 : *iface_names = talloc_move(mem_ctx, &state->iface_names);
697 272 : *num_workers = state->num_workers;
698 272 : *idle_seconds = state->idle_seconds;
699 272 : tevent_req_received(req);
700 272 : return 0;
701 : }
702 :
703 : /*
704 : * For NCACN_NP we get the named pipe auth info from smbd, if a client
705 : * comes in via TCP or NCALPRC we need to invent it ourselves with
706 : * anonymous session info.
707 : */
708 :
709 404 : static NTSTATUS rpc_host_generate_npa_info7_from_sock(
710 : TALLOC_CTX *mem_ctx,
711 : enum dcerpc_transport_t transport,
712 : int sock,
713 : const struct samba_sockaddr *peer_addr,
714 : struct named_pipe_auth_req_info7 **pinfo7)
715 : {
716 404 : struct named_pipe_auth_req_info7 *info7 = NULL;
717 404 : struct samba_sockaddr local_addr = {
718 : .sa_socklen = sizeof(struct sockaddr_storage),
719 : };
720 404 : struct tsocket_address *taddr = NULL;
721 404 : char *remote_client_name = NULL;
722 404 : char *remote_client_addr = NULL;
723 404 : char *local_server_name = NULL;
724 404 : char *local_server_addr = NULL;
725 404 : char *(*tsocket_address_to_name_fn)(
726 : const struct tsocket_address *addr,
727 : TALLOC_CTX *mem_ctx) = NULL;
728 404 : NTSTATUS status = NT_STATUS_NO_MEMORY;
729 : int ret;
730 :
731 : /*
732 : * For NCACN_NP we get the npa info from smbd
733 : */
734 404 : SMB_ASSERT((transport == NCACN_IP_TCP) || (transport == NCALRPC));
735 :
736 404 : tsocket_address_to_name_fn = (transport == NCACN_IP_TCP) ?
737 404 : tsocket_address_inet_addr_string : tsocket_address_unix_path;
738 :
739 404 : info7 = talloc_zero(mem_ctx, struct named_pipe_auth_req_info7);
740 404 : if (info7 == NULL) {
741 0 : goto fail;
742 : }
743 404 : info7->session_info =
744 404 : talloc_zero(info7, struct auth_session_info_transport);
745 404 : if (info7->session_info == NULL) {
746 0 : goto fail;
747 : }
748 :
749 607 : status = make_session_info_anonymous(
750 404 : info7->session_info,
751 404 : &info7->session_info->session_info);
752 404 : if (!NT_STATUS_IS_OK(status)) {
753 0 : DBG_DEBUG("make_session_info_anonymous failed: %s\n",
754 : nt_errstr(status));
755 0 : goto fail;
756 : }
757 :
758 404 : ret = tsocket_address_bsd_from_samba_sockaddr(info7,
759 : peer_addr,
760 : &taddr);
761 404 : if (ret == -1) {
762 0 : status = map_nt_error_from_unix(errno);
763 0 : DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
764 : "%s\n",
765 : strerror(errno));
766 0 : goto fail;
767 : }
768 404 : remote_client_addr = tsocket_address_to_name_fn(taddr, info7);
769 404 : if (remote_client_addr == NULL) {
770 0 : DBG_DEBUG("tsocket_address_to_name_fn failed\n");
771 0 : goto nomem;
772 : }
773 404 : TALLOC_FREE(taddr);
774 :
775 404 : remote_client_name = talloc_strdup(info7, remote_client_addr);
776 404 : if (remote_client_name == NULL) {
777 0 : DBG_DEBUG("talloc_strdup failed\n");
778 0 : goto nomem;
779 : }
780 :
781 404 : if (transport == NCACN_IP_TCP) {
782 396 : bool ok = samba_sockaddr_get_port(peer_addr,
783 : &info7->remote_client_port);
784 396 : if (!ok) {
785 0 : DBG_DEBUG("samba_sockaddr_get_port failed\n");
786 0 : status = NT_STATUS_INVALID_PARAMETER;
787 0 : goto fail;
788 : }
789 : }
790 :
791 404 : ret = getsockname(sock, &local_addr.u.sa, &local_addr.sa_socklen);
792 404 : if (ret == -1) {
793 0 : status = map_nt_error_from_unix(errno);
794 0 : DBG_DEBUG("getsockname failed: %s\n", strerror(errno));
795 0 : goto fail;
796 : }
797 :
798 404 : ret = tsocket_address_bsd_from_samba_sockaddr(info7,
799 : &local_addr,
800 : &taddr);
801 404 : if (ret == -1) {
802 0 : status = map_nt_error_from_unix(errno);
803 0 : DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
804 : "%s\n",
805 : strerror(errno));
806 0 : goto fail;
807 : }
808 404 : local_server_addr = tsocket_address_to_name_fn(taddr, info7);
809 404 : if (local_server_addr == NULL) {
810 0 : DBG_DEBUG("tsocket_address_to_name_fn failed\n");
811 0 : goto nomem;
812 : }
813 404 : TALLOC_FREE(taddr);
814 :
815 404 : local_server_name = talloc_strdup(info7, local_server_addr);
816 404 : if (local_server_name == NULL) {
817 0 : DBG_DEBUG("talloc_strdup failed\n");
818 0 : goto nomem;
819 : }
820 :
821 404 : if (transport == NCACN_IP_TCP) {
822 396 : bool ok = samba_sockaddr_get_port(&local_addr,
823 : &info7->local_server_port);
824 396 : if (!ok) {
825 0 : DBG_DEBUG("samba_sockaddr_get_port failed\n");
826 0 : status = NT_STATUS_INVALID_PARAMETER;
827 0 : goto fail;
828 : }
829 : }
830 :
831 404 : if (transport == NCALRPC) {
832 : uid_t uid;
833 : gid_t gid;
834 :
835 8 : ret = getpeereid(sock, &uid, &gid);
836 8 : if (ret < 0) {
837 0 : status = map_nt_error_from_unix(errno);
838 0 : DBG_DEBUG("getpeereid failed: %s\n", strerror(errno));
839 0 : goto fail;
840 : }
841 :
842 8 : if (uid == sec_initial_uid()) {
843 :
844 : /*
845 : * Indicate "root" to gensec
846 : */
847 :
848 8 : TALLOC_FREE(remote_client_addr);
849 8 : TALLOC_FREE(remote_client_name);
850 :
851 8 : ret = tsocket_address_unix_from_path(
852 : info7,
853 : AS_SYSTEM_MAGIC_PATH_TOKEN,
854 : &taddr);
855 8 : if (ret == -1) {
856 0 : DBG_DEBUG("tsocket_address_unix_from_path "
857 : "failed\n");
858 0 : goto nomem;
859 : }
860 :
861 5 : remote_client_addr =
862 8 : tsocket_address_unix_path(taddr, info7);
863 8 : if (remote_client_addr == NULL) {
864 0 : DBG_DEBUG("tsocket_address_unix_path "
865 : "failed\n");
866 0 : goto nomem;
867 : }
868 5 : remote_client_name =
869 3 : talloc_strdup(info7, remote_client_addr);
870 8 : if (remote_client_name == NULL) {
871 0 : DBG_DEBUG("talloc_strdup failed\n");
872 0 : goto nomem;
873 : }
874 : }
875 : }
876 :
877 404 : info7->remote_client_addr = remote_client_addr;
878 404 : info7->remote_client_name = remote_client_name;
879 404 : info7->local_server_addr = local_server_addr;
880 404 : info7->local_server_name = local_server_name;
881 :
882 404 : *pinfo7 = info7;
883 404 : return NT_STATUS_OK;
884 :
885 0 : nomem:
886 0 : status = NT_STATUS_NO_MEMORY;
887 0 : fail:
888 0 : TALLOC_FREE(info7);
889 0 : return status;
890 : }
891 :
892 : struct rpc_host_bind_read_state {
893 : struct tevent_context *ev;
894 :
895 : int sock;
896 : struct tstream_context *plain;
897 : struct tstream_context *npa_stream;
898 :
899 : struct ncacn_packet *pkt;
900 : struct rpc_host_client *client;
901 : };
902 :
903 : static void rpc_host_bind_read_cleanup(
904 : struct tevent_req *req, enum tevent_req_state req_state);
905 : static void rpc_host_bind_read_got_npa(struct tevent_req *subreq);
906 : static void rpc_host_bind_read_got_bind(struct tevent_req *subreq);
907 :
908 : /*
909 : * Wait for a bind packet from a client.
910 : */
911 818 : static struct tevent_req *rpc_host_bind_read_send(
912 : TALLOC_CTX *mem_ctx,
913 : struct tevent_context *ev,
914 : enum dcerpc_transport_t transport,
915 : int *psock,
916 : const struct samba_sockaddr *peer_addr)
917 : {
918 818 : struct tevent_req *req = NULL, *subreq = NULL;
919 818 : struct rpc_host_bind_read_state *state = NULL;
920 : int rc, sock_dup;
921 : NTSTATUS status;
922 :
923 818 : req = tevent_req_create(
924 : mem_ctx, &state, struct rpc_host_bind_read_state);
925 818 : if (req == NULL) {
926 0 : return NULL;
927 : }
928 818 : state->ev = ev;
929 :
930 818 : state->sock = *psock;
931 818 : *psock = -1;
932 :
933 818 : tevent_req_set_cleanup_fn(req, rpc_host_bind_read_cleanup);
934 :
935 818 : state->client = talloc_zero(state, struct rpc_host_client);
936 818 : if (tevent_req_nomem(state->client, req)) {
937 0 : return tevent_req_post(req, ev);
938 : }
939 :
940 : /*
941 : * Dup the socket to read the first RPC packet:
942 : * tstream_bsd_existing_socket() takes ownership with
943 : * autoclose, but we need to send "sock" down to our worker
944 : * process later.
945 : */
946 818 : sock_dup = dup(state->sock);
947 818 : if (sock_dup == -1) {
948 0 : tevent_req_error(req, errno);
949 0 : return tevent_req_post(req, ev);
950 : }
951 :
952 818 : rc = tstream_bsd_existing_socket(state, sock_dup, &state->plain);
953 818 : if (rc == -1) {
954 0 : DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n",
955 : strerror(errno));
956 0 : tevent_req_error(req, errno);
957 0 : close(sock_dup);
958 0 : return tevent_req_post(req, ev);
959 : }
960 :
961 818 : if (transport == NCACN_NP) {
962 414 : subreq = tstream_npa_accept_existing_send(
963 : state,
964 : ev,
965 414 : state->plain,
966 : FILE_TYPE_MESSAGE_MODE_PIPE,
967 : 0xff | 0x0400 | 0x0100,
968 : 4096);
969 414 : if (tevent_req_nomem(subreq, req)) {
970 0 : return tevent_req_post(req, ev);
971 : }
972 414 : tevent_req_set_callback(
973 : subreq, rpc_host_bind_read_got_npa, req);
974 414 : return req;
975 : }
976 :
977 810 : status = rpc_host_generate_npa_info7_from_sock(
978 404 : state->client,
979 : transport,
980 404 : state->sock,
981 : peer_addr,
982 404 : &state->client->npa_info7);
983 404 : if (!NT_STATUS_IS_OK(status)) {
984 0 : tevent_req_oom(req);
985 0 : return tevent_req_post(req, ev);
986 : }
987 :
988 404 : subreq = dcerpc_read_ncacn_packet_send(state, ev, state->plain);
989 404 : if (tevent_req_nomem(subreq, req)) {
990 0 : return tevent_req_post(req, ev);
991 : }
992 404 : tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
993 404 : return req;
994 : }
995 :
996 1636 : static void rpc_host_bind_read_cleanup(
997 : struct tevent_req *req, enum tevent_req_state req_state)
998 : {
999 1636 : struct rpc_host_bind_read_state *state = tevent_req_data(
1000 : req, struct rpc_host_bind_read_state);
1001 :
1002 1636 : if ((req_state == TEVENT_REQ_RECEIVED) && (state->sock != -1)) {
1003 22 : close(state->sock);
1004 22 : state->sock = -1;
1005 : }
1006 1636 : }
1007 :
1008 414 : static void rpc_host_bind_read_got_npa(struct tevent_req *subreq)
1009 : {
1010 414 : struct tevent_req *req = tevent_req_callback_data(
1011 : subreq, struct tevent_req);
1012 414 : struct rpc_host_bind_read_state *state = tevent_req_data(
1013 : req, struct rpc_host_bind_read_state);
1014 414 : struct named_pipe_auth_req_info7 *info7 = NULL;
1015 : int ret, err;
1016 :
1017 414 : ret = tstream_npa_accept_existing_recv(subreq,
1018 : &err,
1019 : state,
1020 : &state->npa_stream,
1021 : &info7,
1022 : NULL, /* transport */
1023 : NULL, /* remote_client_addr */
1024 : NULL, /* remote_client_name */
1025 : NULL, /* local_server_addr */
1026 : NULL, /* local_server_name */
1027 : NULL); /* session_info */
1028 414 : if (ret == -1) {
1029 0 : tevent_req_error(req, err);
1030 0 : return;
1031 : }
1032 :
1033 414 : state->client->npa_info7 = talloc_move(state->client, &info7);
1034 :
1035 414 : subreq = dcerpc_read_ncacn_packet_send(
1036 : state, state->ev, state->npa_stream);
1037 414 : if (tevent_req_nomem(subreq, req)) {
1038 0 : return;
1039 : }
1040 414 : tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1041 : }
1042 :
1043 818 : static void rpc_host_bind_read_got_bind(struct tevent_req *subreq)
1044 : {
1045 818 : struct tevent_req *req = tevent_req_callback_data(
1046 : subreq, struct tevent_req);
1047 818 : struct rpc_host_bind_read_state *state = tevent_req_data(
1048 : req, struct rpc_host_bind_read_state);
1049 818 : struct ncacn_packet *pkt = NULL;
1050 : NTSTATUS status;
1051 :
1052 1267 : status = dcerpc_read_ncacn_packet_recv(
1053 : subreq,
1054 818 : state->client,
1055 : &pkt,
1056 818 : &state->client->bind_packet);
1057 818 : TALLOC_FREE(subreq);
1058 818 : if (!NT_STATUS_IS_OK(status)) {
1059 22 : DBG_DEBUG("dcerpc_read_ncacn_packet_recv failed: %s\n",
1060 : nt_errstr(status));
1061 22 : tevent_req_error(req, EINVAL); /* TODO */
1062 22 : return;
1063 : }
1064 796 : state->pkt = talloc_move(state, &pkt);
1065 :
1066 796 : tevent_req_done(req);
1067 : }
1068 :
1069 818 : static int rpc_host_bind_read_recv(
1070 : struct tevent_req *req,
1071 : TALLOC_CTX *mem_ctx,
1072 : int *sock,
1073 : struct rpc_host_client **client,
1074 : struct ncacn_packet **bind_pkt)
1075 : {
1076 818 : struct rpc_host_bind_read_state *state = tevent_req_data(
1077 : req, struct rpc_host_bind_read_state);
1078 : int err;
1079 :
1080 818 : if (tevent_req_is_unix_error(req, &err)) {
1081 22 : tevent_req_received(req);
1082 22 : return err;
1083 : }
1084 :
1085 796 : *sock = state->sock;
1086 796 : state->sock = -1;
1087 :
1088 796 : *client = talloc_move(mem_ctx, &state->client);
1089 796 : *bind_pkt = talloc_move(mem_ctx, &state->pkt);
1090 796 : tevent_req_received(req);
1091 796 : return 0;
1092 : }
1093 :
1094 : /*
1095 : * Start the given rpcd_* binary.
1096 : */
1097 145 : static int rpc_host_exec_worker(struct rpc_server *server, size_t idx)
1098 : {
1099 145 : struct rpc_work_process *worker = &server->workers[idx];
1100 145 : char **argv = NULL;
1101 145 : int ret = ENOMEM;
1102 :
1103 145 : argv = str_list_make_empty(server);
1104 145 : str_list_add_printf(
1105 : &argv, "%s", server->rpc_server_exe);
1106 145 : str_list_add_printf(
1107 : &argv, "--configfile=%s", get_dyn_CONFIGFILE());
1108 145 : str_list_add_printf(
1109 : &argv, "--worker-group=%"PRIu32, server->server_index);
1110 145 : str_list_add_printf(
1111 : &argv, "--worker-index=%zu", idx);
1112 145 : str_list_add_printf(
1113 : &argv, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV));
1114 145 : if (!is_default_dyn_LOGFILEBASE()) {
1115 135 : str_list_add_printf(
1116 : &argv, "--log-basename=%s", get_dyn_LOGFILEBASE());
1117 : }
1118 145 : if (argv == NULL) {
1119 0 : ret = ENOMEM;
1120 0 : goto fail;
1121 : }
1122 :
1123 145 : worker->pid = fork();
1124 287 : if (worker->pid == -1) {
1125 0 : ret = errno;
1126 0 : goto fail;
1127 : }
1128 287 : if (worker->pid == 0) {
1129 : /* Child. */
1130 146 : close(server->host->worker_stdin[1]);
1131 146 : ret = dup2(server->host->worker_stdin[0], 0);
1132 146 : if (ret != 0) {
1133 0 : exit(1);
1134 : }
1135 146 : execv(argv[0], argv);
1136 146 : _exit(1);
1137 : }
1138 :
1139 141 : DBG_DEBUG("Creating worker %s for index %zu: pid=%d\n",
1140 : server->rpc_server_exe,
1141 : idx,
1142 : (int)worker->pid);
1143 :
1144 141 : ret = 0;
1145 141 : fail:
1146 141 : TALLOC_FREE(argv);
1147 141 : return ret;
1148 : }
1149 :
1150 : /*
1151 : * Find an rpcd_* worker for an external client, respect server->max_workers
1152 : */
1153 620 : static struct rpc_work_process *rpc_host_find_worker(struct rpc_server *server)
1154 : {
1155 620 : struct rpc_work_process *worker = NULL;
1156 : size_t i;
1157 620 : size_t empty_slot = SIZE_MAX;
1158 :
1159 620 : uint32_t min_clients = UINT32_MAX;
1160 620 : size_t min_worker = server->max_workers;
1161 :
1162 2184 : for (i=0; i<server->max_workers; i++) {
1163 1564 : worker = &server->workers[i];
1164 :
1165 1564 : if (worker->pid == -1) {
1166 984 : empty_slot = MIN(empty_slot, i);
1167 984 : continue;
1168 : }
1169 580 : if (!worker->available) {
1170 0 : continue;
1171 : }
1172 580 : if (worker->num_clients < min_clients) {
1173 569 : min_clients = worker->num_clients;
1174 569 : min_worker = i;
1175 : }
1176 : }
1177 :
1178 620 : if (min_clients == 0) {
1179 558 : return &server->workers[min_worker];
1180 : }
1181 :
1182 62 : if (empty_slot < SIZE_MAX) {
1183 60 : int ret = rpc_host_exec_worker(server, empty_slot);
1184 60 : if (ret != 0) {
1185 0 : DBG_WARNING("Could not fork worker: %s\n",
1186 : strerror(ret));
1187 : }
1188 60 : return NULL;
1189 : }
1190 :
1191 2 : if (min_worker < server->max_workers) {
1192 2 : return &server->workers[min_worker];
1193 : }
1194 :
1195 0 : return NULL;
1196 : }
1197 :
1198 : /*
1199 : * Find an rpcd_* worker for an internal connection, possibly go beyond
1200 : * server->max_workers
1201 : */
1202 301 : static struct rpc_work_process *rpc_host_find_idle_worker(
1203 : struct rpc_server *server)
1204 : {
1205 301 : struct rpc_work_process *worker = NULL, *tmp = NULL;
1206 301 : size_t i, num_workers = talloc_array_length(server->workers);
1207 301 : size_t empty_slot = SIZE_MAX;
1208 : int ret;
1209 :
1210 489 : for (i=server->max_workers; i<num_workers; i++) {
1211 404 : worker = &server->workers[i];
1212 :
1213 404 : if (worker->pid == -1) {
1214 29 : empty_slot = MIN(empty_slot, i);
1215 29 : continue;
1216 : }
1217 375 : if (!worker->available) {
1218 0 : continue;
1219 : }
1220 375 : if (worker->num_clients == 0) {
1221 216 : return &server->workers[i];
1222 : }
1223 : }
1224 :
1225 85 : if (empty_slot < SIZE_MAX) {
1226 17 : ret = rpc_host_exec_worker(server, empty_slot);
1227 17 : if (ret != 0) {
1228 0 : DBG_WARNING("Could not fork worker: %s\n",
1229 : strerror(ret));
1230 : }
1231 17 : return NULL;
1232 : }
1233 :
1234 : /*
1235 : * All workers are busy. We need to expand the number of
1236 : * workers because we were asked for an idle worker.
1237 : */
1238 68 : if (num_workers+1 < num_workers) {
1239 0 : return NULL;
1240 : }
1241 68 : tmp = talloc_realloc(
1242 : server,
1243 : server->workers,
1244 : struct rpc_work_process,
1245 : num_workers+1);
1246 68 : if (tmp == NULL) {
1247 0 : return NULL;
1248 : }
1249 68 : server->workers = tmp;
1250 :
1251 68 : server->workers[num_workers] = (struct rpc_work_process) { .pid=-1, };
1252 :
1253 68 : ret = rpc_host_exec_worker(server, num_workers);
1254 64 : if (ret != 0) {
1255 0 : DBG_WARNING("Could not exec worker: %s\n", strerror(ret));
1256 : }
1257 :
1258 64 : return NULL;
1259 : }
1260 :
1261 : /*
1262 : * Find an rpcd_* process to talk to. Start a new one if necessary.
1263 : */
1264 1720 : static void rpc_host_distribute_clients(struct rpc_server *server)
1265 : {
1266 1720 : struct rpc_work_process *worker = NULL;
1267 1720 : struct rpc_host_pending_client *pending_client = NULL;
1268 : uint32_t assoc_group_id;
1269 : DATA_BLOB blob;
1270 : struct iovec iov;
1271 : enum ndr_err_code ndr_err;
1272 : NTSTATUS status;
1273 :
1274 1720 : again:
1275 1720 : pending_client = server->pending_clients;
1276 1720 : if (pending_client == NULL) {
1277 787 : DBG_DEBUG("No pending clients\n");
1278 1357 : return;
1279 : }
1280 :
1281 933 : assoc_group_id = pending_client->bind_pkt->u.bind.assoc_group_id;
1282 :
1283 933 : if (assoc_group_id != 0) {
1284 12 : size_t num_workers = talloc_array_length(server->workers);
1285 12 : uint8_t worker_index = assoc_group_id >> 24;
1286 :
1287 12 : if (worker_index >= num_workers) {
1288 0 : DBG_DEBUG("Invalid assoc group id %"PRIu32"\n",
1289 : assoc_group_id);
1290 0 : goto done;
1291 : }
1292 12 : worker = &server->workers[worker_index];
1293 :
1294 12 : if ((worker->pid == -1) || !worker->available) {
1295 0 : DBG_DEBUG("Requested worker index %"PRIu8": "
1296 : "pid=%d, available=%d",
1297 : worker_index,
1298 : (int)worker->pid,
1299 : (int)worker->available);
1300 : /*
1301 : * Pick a random one for a proper bind nack
1302 : */
1303 0 : worker = rpc_host_find_worker(server);
1304 : }
1305 : } else {
1306 921 : struct auth_session_info_transport *session_info =
1307 921 : pending_client->client->npa_info7->session_info;
1308 921 : uint32_t flags = 0;
1309 : bool found;
1310 :
1311 921 : found = security_token_find_npa_flags(
1312 921 : session_info->session_info->security_token,
1313 : &flags);
1314 :
1315 : /* fresh assoc group requested */
1316 921 : if (found & (flags & SAMBA_NPA_FLAGS_NEED_IDLE)) {
1317 301 : worker = rpc_host_find_idle_worker(server);
1318 : } else {
1319 620 : worker = rpc_host_find_worker(server);
1320 : }
1321 : }
1322 :
1323 929 : if (worker == NULL) {
1324 141 : DBG_DEBUG("No worker found\n");
1325 141 : return;
1326 : }
1327 :
1328 788 : DLIST_REMOVE(server->pending_clients, pending_client);
1329 :
1330 788 : ndr_err = ndr_push_struct_blob(
1331 : &blob,
1332 : pending_client,
1333 788 : pending_client->client,
1334 : (ndr_push_flags_fn_t)ndr_push_rpc_host_client);
1335 788 : if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1336 0 : DBG_WARNING("ndr_push_rpc_host_client failed: %s\n",
1337 : ndr_errstr(ndr_err));
1338 0 : goto done;
1339 : }
1340 :
1341 788 : DBG_INFO("Sending new client %s to %d with %"PRIu32" clients\n",
1342 : server->rpc_server_exe,
1343 : worker->pid,
1344 : worker->num_clients);
1345 :
1346 788 : iov = (struct iovec) {
1347 788 : .iov_base = blob.data, .iov_len = blob.length,
1348 : };
1349 :
1350 1576 : status = messaging_send_iov(
1351 788 : server->host->msg_ctx,
1352 : pid_to_procid(worker->pid),
1353 : MSG_RPC_HOST_NEW_CLIENT,
1354 : &iov,
1355 : 1,
1356 788 : &pending_client->sock,
1357 : 1);
1358 788 : if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND)) {
1359 0 : DBG_DEBUG("worker %d died, sigchld not yet received?\n",
1360 : worker->pid);
1361 0 : DLIST_ADD(server->pending_clients, pending_client);
1362 0 : worker->available = false;
1363 0 : goto again;
1364 : }
1365 788 : if (!NT_STATUS_IS_OK(status)) {
1366 0 : DBG_DEBUG("messaging_send_iov failed: %s\n",
1367 : nt_errstr(status));
1368 0 : goto done;
1369 : }
1370 788 : worker->num_clients += 1;
1371 788 : TALLOC_FREE(worker->exit_timer);
1372 :
1373 804 : TALLOC_FREE(server->host->np_helper_shutdown);
1374 :
1375 1194 : done:
1376 788 : TALLOC_FREE(pending_client);
1377 : }
1378 :
1379 788 : static int rpc_host_pending_client_destructor(
1380 : struct rpc_host_pending_client *p)
1381 : {
1382 788 : TALLOC_FREE(p->hangup_wait);
1383 788 : if (p->sock != -1) {
1384 788 : close(p->sock);
1385 788 : p->sock = -1;
1386 : }
1387 788 : DLIST_REMOVE(p->server->pending_clients, p);
1388 788 : return 0;
1389 : }
1390 :
1391 : /*
1392 : * Exception condition handler before rpcd_* worker
1393 : * is handling the socket. Either the client exited or
1394 : * sent unexpected data after the initial bind.
1395 : */
1396 0 : static void rpc_host_client_exited(struct tevent_req *subreq)
1397 : {
1398 0 : struct rpc_host_pending_client *pending = tevent_req_callback_data(
1399 : subreq, struct rpc_host_pending_client);
1400 : bool ok;
1401 : int err;
1402 :
1403 0 : ok = wait_for_read_recv(subreq, &err);
1404 :
1405 0 : TALLOC_FREE(subreq);
1406 0 : pending->hangup_wait = NULL;
1407 :
1408 0 : if (ok) {
1409 0 : DBG_DEBUG("client on sock %d sent data\n", pending->sock);
1410 : } else {
1411 0 : DBG_DEBUG("client exited with %s\n", strerror(err));
1412 : }
1413 0 : TALLOC_FREE(pending);
1414 0 : }
1415 :
1416 : struct rpc_iface_binding_map {
1417 : struct ndr_syntax_id iface;
1418 : char *bindings;
1419 : };
1420 :
1421 656 : static bool rpc_iface_binding_map_add_endpoint(
1422 : TALLOC_CTX *mem_ctx,
1423 : const struct rpc_host_endpoint *ep,
1424 : struct rpc_host_iface_name *iface_names,
1425 : struct rpc_iface_binding_map **pmaps)
1426 : {
1427 656 : const struct ndr_syntax_id mgmt_iface = {
1428 : {0xafa8bd80,
1429 : 0x7d8a,
1430 : 0x11c9,
1431 : {0xbe,0xf4},
1432 : {0x08,0x00,0x2b,0x10,0x29,0x89}
1433 : },
1434 : 1.0};
1435 :
1436 656 : struct rpc_iface_binding_map *maps = *pmaps;
1437 656 : size_t i, num_ifaces = talloc_array_length(ep->interfaces);
1438 656 : char *binding_string = NULL;
1439 656 : bool ok = false;
1440 :
1441 656 : binding_string = dcerpc_binding_string(mem_ctx, ep->binding);
1442 656 : if (binding_string == NULL) {
1443 0 : return false;
1444 : }
1445 :
1446 1448 : for (i=0; i<num_ifaces; i++) {
1447 792 : const struct ndr_syntax_id *iface = &ep->interfaces[i];
1448 792 : size_t j, num_maps = talloc_array_length(maps);
1449 792 : struct rpc_iface_binding_map *map = NULL;
1450 792 : char *p = NULL;
1451 :
1452 792 : if (ndr_syntax_id_equal(iface, &mgmt_iface)) {
1453 : /*
1454 : * mgmt is offered everywhere, don't put it
1455 : * into epmdb.tdb.
1456 : */
1457 0 : continue;
1458 : }
1459 :
1460 1884 : for (j=0; j<num_maps; j++) {
1461 1366 : map = &maps[j];
1462 1366 : if (ndr_syntax_id_equal(&map->iface, iface)) {
1463 274 : break;
1464 : }
1465 : }
1466 :
1467 792 : if (j == num_maps) {
1468 518 : struct rpc_iface_binding_map *tmp = NULL;
1469 518 : struct rpc_host_iface_name *iface_name = NULL;
1470 :
1471 518 : iface_name = rpc_host_iface_names_find(
1472 : iface_names, iface);
1473 518 : if (iface_name == NULL) {
1474 0 : goto fail;
1475 : }
1476 :
1477 518 : tmp = talloc_realloc(
1478 : mem_ctx,
1479 : maps,
1480 : struct rpc_iface_binding_map,
1481 : num_maps+1);
1482 518 : if (tmp == NULL) {
1483 0 : goto fail;
1484 : }
1485 518 : maps = tmp;
1486 :
1487 518 : map = &maps[num_maps];
1488 518 : *map = (struct rpc_iface_binding_map) {
1489 518 : .iface = *iface,
1490 518 : .bindings = talloc_move(
1491 : maps, &iface_name->name),
1492 : };
1493 : }
1494 :
1495 792 : p = strv_find(map->bindings, binding_string);
1496 792 : if (p == NULL) {
1497 792 : int ret = strv_add(
1498 : maps, &map->bindings, binding_string);
1499 792 : if (ret != 0) {
1500 0 : goto fail;
1501 : }
1502 : }
1503 : }
1504 :
1505 656 : ok = true;
1506 656 : fail:
1507 656 : *pmaps = maps;
1508 656 : return ok;
1509 : }
1510 :
1511 272 : static bool rpc_iface_binding_map_add_endpoints(
1512 : TALLOC_CTX *mem_ctx,
1513 : struct rpc_host_endpoint **endpoints,
1514 : struct rpc_host_iface_name *iface_names,
1515 : struct rpc_iface_binding_map **pbinding_maps)
1516 : {
1517 272 : size_t i, num_endpoints = talloc_array_length(endpoints);
1518 :
1519 928 : for (i=0; i<num_endpoints; i++) {
1520 656 : bool ok = rpc_iface_binding_map_add_endpoint(
1521 656 : mem_ctx, endpoints[i], iface_names, pbinding_maps);
1522 656 : if (!ok) {
1523 0 : return false;
1524 : }
1525 : }
1526 272 : return true;
1527 : }
1528 :
1529 272 : static bool rpc_host_fill_epm_db(
1530 : struct tdb_wrap *db,
1531 : struct rpc_host_endpoint **endpoints,
1532 : struct rpc_host_iface_name *iface_names)
1533 : {
1534 272 : struct rpc_iface_binding_map *maps = NULL;
1535 : size_t i, num_maps;
1536 272 : bool ret = false;
1537 : bool ok;
1538 :
1539 272 : ok = rpc_iface_binding_map_add_endpoints(
1540 : talloc_tos(), endpoints, iface_names, &maps);
1541 272 : if (!ok) {
1542 0 : goto fail;
1543 : }
1544 :
1545 272 : num_maps = talloc_array_length(maps);
1546 :
1547 790 : for (i=0; i<num_maps; i++) {
1548 518 : struct rpc_iface_binding_map *map = &maps[i];
1549 : struct ndr_syntax_id_buf buf;
1550 518 : char *keystr = ndr_syntax_id_buf_string(&map->iface, &buf);
1551 1036 : TDB_DATA value = {
1552 518 : .dptr = (uint8_t *)map->bindings,
1553 518 : .dsize = talloc_array_length(map->bindings),
1554 : };
1555 : int rc;
1556 :
1557 518 : rc = tdb_store(
1558 : db->tdb, string_term_tdb_data(keystr), value, 0);
1559 518 : if (rc == -1) {
1560 0 : DBG_DEBUG("tdb_store() failed: %s\n",
1561 : tdb_errorstr(db->tdb));
1562 0 : goto fail;
1563 : }
1564 : }
1565 :
1566 272 : ret = true;
1567 272 : fail:
1568 272 : TALLOC_FREE(maps);
1569 272 : return ret;
1570 : }
1571 :
1572 : struct rpc_server_setup_state {
1573 : struct rpc_server *server;
1574 : };
1575 :
1576 : static void rpc_server_setup_got_endpoints(struct tevent_req *subreq);
1577 :
1578 : /*
1579 : * Async initialize state for all possible rpcd_* servers.
1580 : * Note this does not start them.
1581 : */
1582 272 : static struct tevent_req *rpc_server_setup_send(
1583 : TALLOC_CTX *mem_ctx,
1584 : struct tevent_context *ev,
1585 : struct rpc_host *host,
1586 : const char *rpc_server_exe)
1587 : {
1588 272 : struct tevent_req *req = NULL, *subreq = NULL;
1589 272 : struct rpc_server_setup_state *state = NULL;
1590 272 : struct rpc_server *server = NULL;
1591 :
1592 272 : req = tevent_req_create(
1593 : mem_ctx, &state, struct rpc_server_setup_state);
1594 272 : if (req == NULL) {
1595 0 : return NULL;
1596 : }
1597 272 : state->server = talloc_zero(state, struct rpc_server);
1598 272 : if (tevent_req_nomem(state->server, req)) {
1599 0 : return tevent_req_post(req, ev);
1600 : }
1601 :
1602 272 : server = state->server;
1603 :
1604 272 : *server = (struct rpc_server) {
1605 : .host = host,
1606 : .server_index = UINT32_MAX,
1607 272 : .rpc_server_exe = talloc_strdup(server, rpc_server_exe),
1608 : };
1609 272 : if (tevent_req_nomem(server->rpc_server_exe, req)) {
1610 0 : return tevent_req_post(req, ev);
1611 : }
1612 :
1613 272 : subreq = rpc_server_get_endpoints_send(
1614 : state,
1615 : ev,
1616 : rpc_server_exe,
1617 272 : host->np_helper ? NCACN_NP : NCA_UNKNOWN);
1618 272 : if (tevent_req_nomem(subreq, req)) {
1619 0 : return tevent_req_post(req, ev);
1620 : }
1621 272 : tevent_req_set_callback(subreq, rpc_server_setup_got_endpoints, req);
1622 272 : return req;
1623 : }
1624 :
1625 272 : static void rpc_server_setup_got_endpoints(struct tevent_req *subreq)
1626 : {
1627 272 : struct tevent_req *req = tevent_req_callback_data(
1628 : subreq, struct tevent_req);
1629 272 : struct rpc_server_setup_state *state = tevent_req_data(
1630 : req, struct rpc_server_setup_state);
1631 272 : struct rpc_server *server = state->server;
1632 : int ret;
1633 : size_t i, num_endpoints;
1634 : bool ok;
1635 :
1636 272 : ret = rpc_server_get_endpoints_recv(
1637 : subreq,
1638 : server,
1639 : &server->endpoints,
1640 : &server->iface_names,
1641 : &server->max_workers,
1642 : &server->idle_seconds);
1643 272 : TALLOC_FREE(subreq);
1644 272 : if (ret != 0) {
1645 0 : tevent_req_nterror(req, map_nt_error_from_unix(ret));
1646 0 : return;
1647 : }
1648 :
1649 272 : server->workers = talloc_array(
1650 : server, struct rpc_work_process, server->max_workers);
1651 272 : if (tevent_req_nomem(server->workers, req)) {
1652 0 : return;
1653 : }
1654 :
1655 1360 : for (i=0; i<server->max_workers; i++) {
1656 : /* mark as not yet created */
1657 1088 : server->workers[i] = (struct rpc_work_process) { .pid=-1, };
1658 : }
1659 :
1660 272 : num_endpoints = talloc_array_length(server->endpoints);
1661 :
1662 928 : for (i=0; i<num_endpoints; i++) {
1663 656 : struct rpc_host_endpoint *e = server->endpoints[i];
1664 : NTSTATUS status;
1665 : size_t j;
1666 :
1667 656 : e->server = server;
1668 :
1669 656 : status = dcesrv_create_binding_sockets(
1670 : e->binding, e, &e->num_fds, &e->fds);
1671 656 : if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_SUPPORTED)) {
1672 8 : continue;
1673 : }
1674 648 : if (tevent_req_nterror(req, status)) {
1675 0 : DBG_DEBUG("dcesrv_create_binding_sockets failed: %s\n",
1676 : nt_errstr(status));
1677 0 : return;
1678 : }
1679 :
1680 1336 : for (j=0; j<e->num_fds; j++) {
1681 688 : ret = listen(e->fds[j], 256);
1682 688 : if (ret == -1) {
1683 0 : tevent_req_nterror(
1684 : req, map_nt_error_from_unix(errno));
1685 0 : return;
1686 : }
1687 : }
1688 : }
1689 :
1690 456 : ok = rpc_host_fill_epm_db(
1691 272 : server->host->epmdb, server->endpoints, server->iface_names);
1692 272 : if (!ok) {
1693 0 : DBG_DEBUG("rpc_host_fill_epm_db failed\n");
1694 : }
1695 :
1696 272 : tevent_req_done(req);
1697 : }
1698 :
1699 272 : static NTSTATUS rpc_server_setup_recv(
1700 : struct tevent_req *req, TALLOC_CTX *mem_ctx, struct rpc_server **server)
1701 : {
1702 272 : struct rpc_server_setup_state *state = tevent_req_data(
1703 : req, struct rpc_server_setup_state);
1704 : NTSTATUS status;
1705 :
1706 272 : if (tevent_req_is_nterror(req, &status)) {
1707 0 : tevent_req_received(req);
1708 0 : return status;
1709 : }
1710 :
1711 272 : *server = talloc_move(mem_ctx, &state->server);
1712 272 : tevent_req_received(req);
1713 272 : return NT_STATUS_OK;
1714 : }
1715 :
1716 : /*
1717 : * rpcd_* died. Called from SIGCHLD handler.
1718 : */
1719 164 : static void rpc_worker_exited(struct rpc_host *host, pid_t pid)
1720 : {
1721 164 : size_t i, num_servers = talloc_array_length(host->servers);
1722 164 : struct rpc_work_process *worker = NULL;
1723 164 : bool found_pid = false;
1724 164 : bool have_active_worker = false;
1725 :
1726 1476 : for (i=0; i<num_servers; i++) {
1727 1312 : struct rpc_server *server = host->servers[i];
1728 : size_t j, num_workers;
1729 :
1730 1312 : if (server == NULL) {
1731 : /* SIGCHLD for --list-interfaces run */
1732 390 : continue;
1733 : }
1734 :
1735 922 : num_workers = talloc_array_length(server->workers);
1736 :
1737 4926 : for (j=0; j<num_workers; j++) {
1738 4004 : worker = &server->workers[j];
1739 4004 : if (worker->pid == pid) {
1740 105 : found_pid = true;
1741 105 : worker->pid = -1;
1742 105 : worker->available = false;
1743 : }
1744 :
1745 4004 : if (worker->pid != -1) {
1746 215 : have_active_worker = true;
1747 : }
1748 : }
1749 : }
1750 :
1751 164 : if (!found_pid) {
1752 59 : DBG_WARNING("No worker with PID %d\n", (int)pid);
1753 59 : return;
1754 : }
1755 :
1756 105 : if (!have_active_worker && host->np_helper) {
1757 : /*
1758 : * We have nothing left to do as an np_helper.
1759 : * Terminate ourselves (samba-dcerpcd). We will
1760 : * be restarted on demand anyway.
1761 : */
1762 22 : DBG_DEBUG("Exiting idle np helper\n");
1763 22 : exit(0);
1764 : }
1765 : }
1766 :
1767 : /*
1768 : * rpcd_* died.
1769 : */
1770 339 : static void rpc_host_sigchld(
1771 : struct tevent_context *ev,
1772 : struct tevent_signal *se,
1773 : int signum,
1774 : int count,
1775 : void *siginfo,
1776 : void *private_data)
1777 : {
1778 339 : struct rpc_host *state = talloc_get_type_abort(
1779 : private_data, struct rpc_host);
1780 : pid_t pid;
1781 : int wstatus;
1782 :
1783 706 : while ((pid = waitpid(-1, &wstatus, WNOHANG)) > 0) {
1784 164 : DBG_DEBUG("pid=%d, wstatus=%d\n", (int)pid, wstatus);
1785 164 : rpc_worker_exited(state, pid);
1786 : }
1787 317 : }
1788 :
1789 : /*
1790 : * Idle timer fired for a rcpd_* worker. Ask it to terminate.
1791 : */
1792 105 : static void rpc_host_exit_worker(
1793 : struct tevent_context *ev,
1794 : struct tevent_timer *te,
1795 : struct timeval current_time,
1796 : void *private_data)
1797 : {
1798 105 : struct rpc_server *server = talloc_get_type_abort(
1799 : private_data, struct rpc_server);
1800 105 : size_t i, num_workers = talloc_array_length(server->workers);
1801 :
1802 : /*
1803 : * Scan for the right worker. We don't have too many of those,
1804 : * and maintaining an index would be more data structure effort.
1805 : */
1806 :
1807 678 : for (i=0; i<num_workers; i++) {
1808 440 : struct rpc_work_process *w = &server->workers[i];
1809 : NTSTATUS status;
1810 :
1811 440 : if (w->exit_timer != te) {
1812 335 : continue;
1813 : }
1814 105 : w->exit_timer = NULL;
1815 :
1816 105 : SMB_ASSERT(w->num_clients == 0);
1817 :
1818 164 : status = messaging_send(
1819 105 : server->host->msg_ctx,
1820 : pid_to_procid(w->pid),
1821 : MSG_SHUTDOWN,
1822 : NULL);
1823 105 : if (!NT_STATUS_IS_OK(status)) {
1824 0 : DBG_DEBUG("Could not send SHUTDOWN msg: %s\n",
1825 : nt_errstr(status));
1826 : }
1827 :
1828 105 : w->available = false;
1829 105 : break;
1830 : }
1831 105 : }
1832 :
1833 : /*
1834 : * rcpd_* worker replied with its status.
1835 : */
1836 924 : static void rpc_host_child_status_recv(
1837 : struct messaging_context *msg,
1838 : void *private_data,
1839 : uint32_t msg_type,
1840 : struct server_id server_id,
1841 : DATA_BLOB *data)
1842 : {
1843 924 : struct rpc_host *host = talloc_get_type_abort(
1844 : private_data, struct rpc_host);
1845 924 : size_t num_servers = talloc_array_length(host->servers);
1846 924 : struct rpc_server *server = NULL;
1847 : size_t num_workers;
1848 924 : pid_t src_pid = procid_to_pid(&server_id);
1849 924 : struct rpc_work_process *worker = NULL;
1850 : struct rpc_worker_status status_message;
1851 : enum ndr_err_code ndr_err;
1852 :
1853 924 : ndr_err = ndr_pull_struct_blob_all_noalloc(
1854 : data,
1855 : &status_message,
1856 : (ndr_pull_flags_fn_t)ndr_pull_rpc_worker_status);
1857 924 : if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1858 : struct server_id_buf buf;
1859 0 : DBG_WARNING("Got invalid message from pid %s\n",
1860 : server_id_str_buf(server_id, &buf));
1861 0 : return;
1862 : }
1863 924 : if (DEBUGLEVEL >= 10) {
1864 0 : NDR_PRINT_DEBUG(rpc_worker_status, &status_message);
1865 : }
1866 :
1867 924 : if (status_message.server_index >= num_servers) {
1868 0 : DBG_WARNING("Got invalid server_index=%"PRIu32", "
1869 : "num_servers=%zu\n",
1870 : status_message.server_index,
1871 : num_servers);
1872 0 : return;
1873 : }
1874 :
1875 924 : server = host->servers[status_message.server_index];
1876 :
1877 924 : num_workers = talloc_array_length(server->workers);
1878 924 : if (status_message.worker_index >= num_workers) {
1879 0 : DBG_WARNING("Got invalid worker_index=%"PRIu32", "
1880 : "num_workers=%zu\n",
1881 : status_message.worker_index,
1882 : num_workers);
1883 0 : return;
1884 : }
1885 924 : worker = &server->workers[status_message.worker_index];
1886 :
1887 924 : if (src_pid != worker->pid) {
1888 0 : DBG_WARNING("Got idx=%"PRIu32" from %d, expected %d\n",
1889 : status_message.worker_index,
1890 : (int)src_pid,
1891 : worker->pid);
1892 0 : return;
1893 : }
1894 :
1895 924 : worker->available = true;
1896 924 : worker->num_clients = status_message.num_clients;
1897 :
1898 924 : if (worker->num_clients != 0) {
1899 8 : TALLOC_FREE(worker->exit_timer);
1900 : } else {
1901 916 : worker->exit_timer = tevent_add_timer(
1902 : messaging_tevent_context(msg),
1903 : server->workers,
1904 : tevent_timeval_current_ofs(server->idle_seconds, 0),
1905 : rpc_host_exit_worker,
1906 : server);
1907 : /* No NULL check, it's not fatal if this does not work */
1908 : }
1909 :
1910 924 : rpc_host_distribute_clients(server);
1911 : }
1912 :
1913 : /*
1914 : * samba-dcerpcd has been asked to shutdown.
1915 : * Mark the initial tevent_req as done so we
1916 : * exit the event loop.
1917 : */
1918 0 : static void rpc_host_msg_shutdown(
1919 : struct messaging_context *msg,
1920 : void *private_data,
1921 : uint32_t msg_type,
1922 : struct server_id server_id,
1923 : DATA_BLOB *data)
1924 : {
1925 0 : struct tevent_req *req = talloc_get_type_abort(
1926 : private_data, struct tevent_req);
1927 0 : tevent_req_done(req);
1928 0 : }
1929 :
1930 : /*
1931 : * Only match directory entries starting in rpcd_
1932 : */
1933 8012 : static int rpcd_filter(const struct dirent *d)
1934 : {
1935 8012 : int match = fnmatch("rpcd_*", d->d_name, 0);
1936 8012 : return (match == 0) ? 1 : 0;
1937 : }
1938 :
1939 : /*
1940 : * Scan the given libexecdir for rpcd_* services
1941 : * and return them as a strv list.
1942 : */
1943 29 : static int rpc_host_list_servers(
1944 : const char *libexecdir, TALLOC_CTX *mem_ctx, char **pservers)
1945 : {
1946 29 : char *servers = NULL;
1947 29 : struct dirent **namelist = NULL;
1948 : int i, num_servers;
1949 29 : int ret = ENOMEM;
1950 :
1951 29 : num_servers = scandir(libexecdir, &namelist, rpcd_filter, alphasort);
1952 29 : if (num_servers == -1) {
1953 0 : DBG_DEBUG("scandir failed: %s\n", strerror(errno));
1954 0 : return errno;
1955 : }
1956 :
1957 261 : for (i=0; i<num_servers; i++) {
1958 232 : char *exe = talloc_asprintf(
1959 232 : mem_ctx, "%s/%s", libexecdir, namelist[i]->d_name);
1960 232 : if (exe == NULL) {
1961 0 : goto fail;
1962 : }
1963 :
1964 232 : ret = strv_add(mem_ctx, &servers, exe);
1965 232 : TALLOC_FREE(exe);
1966 232 : if (ret != 0) {
1967 0 : goto fail;
1968 : }
1969 : }
1970 29 : fail:
1971 261 : for (i=0; i<num_servers; i++) {
1972 232 : SAFE_FREE(namelist[i]);
1973 : }
1974 29 : SAFE_FREE(namelist);
1975 :
1976 29 : if (ret != 0) {
1977 0 : TALLOC_FREE(servers);
1978 0 : return ret;
1979 : }
1980 29 : *pservers = servers;
1981 29 : return 0;
1982 : }
1983 :
1984 : struct rpc_host_endpoint_accept_state {
1985 : struct tevent_context *ev;
1986 : struct rpc_host_endpoint *endpoint;
1987 : };
1988 :
1989 : static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq);
1990 : static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq);
1991 :
1992 : /*
1993 : * Asynchronously wait for a DCERPC connection from a client.
1994 : */
1995 656 : static struct tevent_req *rpc_host_endpoint_accept_send(
1996 : TALLOC_CTX *mem_ctx,
1997 : struct tevent_context *ev,
1998 : struct rpc_host_endpoint *endpoint)
1999 : {
2000 656 : struct tevent_req *req = NULL;
2001 656 : struct rpc_host_endpoint_accept_state *state = NULL;
2002 : size_t i;
2003 :
2004 656 : req = tevent_req_create(
2005 : mem_ctx, &state, struct rpc_host_endpoint_accept_state);
2006 656 : if (req == NULL) {
2007 0 : return NULL;
2008 : }
2009 656 : state->ev = ev;
2010 656 : state->endpoint = endpoint;
2011 :
2012 1344 : for (i=0; i<endpoint->num_fds; i++) {
2013 688 : struct tevent_req *subreq = NULL;
2014 :
2015 688 : subreq = accept_send(state, ev, endpoint->fds[i]);
2016 688 : if (tevent_req_nomem(subreq, req)) {
2017 0 : return tevent_req_post(req, ev);
2018 : }
2019 688 : tevent_req_set_callback(
2020 : subreq, rpc_host_endpoint_accept_accepted, req);
2021 : }
2022 :
2023 656 : return req;
2024 : }
2025 :
2026 : /*
2027 : * Accept a DCERPC connection from a client.
2028 : */
2029 818 : static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq)
2030 : {
2031 818 : struct tevent_req *req = tevent_req_callback_data(
2032 : subreq, struct tevent_req);
2033 818 : struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2034 : req, struct rpc_host_endpoint_accept_state);
2035 818 : struct rpc_host_endpoint *endpoint = state->endpoint;
2036 : int sock, listen_sock, err;
2037 : struct samba_sockaddr peer_addr;
2038 :
2039 818 : sock = accept_recv(subreq, &listen_sock, &peer_addr, &err);
2040 818 : TALLOC_FREE(subreq);
2041 818 : if (sock == -1) {
2042 : /* What to do here? Just ignore the error and retry? */
2043 0 : DBG_DEBUG("accept_recv failed: %s\n", strerror(err));
2044 0 : tevent_req_error(req, err);
2045 0 : return;
2046 : }
2047 :
2048 818 : subreq = accept_send(state, state->ev, listen_sock);
2049 818 : if (tevent_req_nomem(subreq, req)) {
2050 0 : close(sock);
2051 0 : sock = -1;
2052 0 : return;
2053 : }
2054 818 : tevent_req_set_callback(
2055 : subreq, rpc_host_endpoint_accept_accepted, req);
2056 :
2057 818 : subreq = rpc_host_bind_read_send(
2058 : state,
2059 : state->ev,
2060 818 : dcerpc_binding_get_transport(endpoint->binding),
2061 : &sock,
2062 : &peer_addr);
2063 818 : if (tevent_req_nomem(subreq, req)) {
2064 0 : return;
2065 : }
2066 818 : tevent_req_set_callback(
2067 : subreq, rpc_host_endpoint_accept_got_bind, req);
2068 : }
2069 :
2070 : /*
2071 : * Client sent us a DCERPC bind packet.
2072 : */
2073 818 : static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq)
2074 : {
2075 818 : struct tevent_req *req = tevent_req_callback_data(
2076 : subreq, struct tevent_req);
2077 818 : struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2078 : req, struct rpc_host_endpoint_accept_state);
2079 818 : struct rpc_host_endpoint *endpoint = state->endpoint;
2080 818 : struct rpc_server *server = endpoint->server;
2081 818 : struct rpc_host_pending_client *pending = NULL;
2082 818 : struct rpc_host_client *client = NULL;
2083 818 : struct ncacn_packet *bind_pkt = NULL;
2084 : int ret;
2085 818 : int sock=-1;
2086 :
2087 818 : ret = rpc_host_bind_read_recv(
2088 : subreq, state, &sock, &client, &bind_pkt);
2089 818 : TALLOC_FREE(subreq);
2090 818 : if (ret != 0) {
2091 22 : DBG_DEBUG("rpc_host_bind_read_recv returned %s\n",
2092 : strerror(ret));
2093 22 : goto fail;
2094 : }
2095 :
2096 796 : client->binding = dcerpc_binding_string(client, endpoint->binding);
2097 796 : if (client->binding == NULL) {
2098 0 : DBG_WARNING("dcerpc_binding_string failed, dropping client\n");
2099 0 : goto fail;
2100 : }
2101 :
2102 796 : pending = talloc_zero(server, struct rpc_host_pending_client);
2103 796 : if (pending == NULL) {
2104 0 : DBG_WARNING("talloc failed, dropping client\n");
2105 0 : goto fail;
2106 : }
2107 796 : pending->server = server;
2108 796 : pending->sock = sock;
2109 796 : pending->bind_pkt = talloc_move(pending, &bind_pkt);
2110 796 : pending->client = talloc_move(pending, &client);
2111 796 : talloc_set_destructor(pending, rpc_host_pending_client_destructor);
2112 796 : sock = -1;
2113 :
2114 796 : pending->hangup_wait = wait_for_read_send(
2115 : pending, state->ev, pending->sock, true);
2116 796 : if (pending->hangup_wait == NULL) {
2117 0 : DBG_WARNING("wait_for_read_send failed, dropping client\n");
2118 0 : TALLOC_FREE(pending);
2119 792 : return;
2120 : }
2121 796 : tevent_req_set_callback(
2122 : pending->hangup_wait, rpc_host_client_exited, pending);
2123 :
2124 796 : DLIST_ADD_END(server->pending_clients, pending);
2125 796 : rpc_host_distribute_clients(server);
2126 792 : return;
2127 :
2128 22 : fail:
2129 22 : TALLOC_FREE(client);
2130 22 : if (sock != -1) {
2131 0 : close(sock);
2132 : }
2133 : }
2134 :
2135 0 : static int rpc_host_endpoint_accept_recv(
2136 : struct tevent_req *req, struct rpc_host_endpoint **ep)
2137 : {
2138 0 : struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2139 : req, struct rpc_host_endpoint_accept_state);
2140 :
2141 0 : *ep = state->endpoint;
2142 :
2143 0 : return tevent_req_simple_recv_unix(req);
2144 : }
2145 :
2146 : /*
2147 : * Full state for samba-dcerpcd. Everything else
2148 : * is hung off this.
2149 : */
2150 : struct rpc_host_state {
2151 : struct tevent_context *ev;
2152 : struct rpc_host *host;
2153 :
2154 : bool is_ready;
2155 : const char *daemon_ready_progname;
2156 : struct tevent_immediate *ready_signal_immediate;
2157 : int *ready_signal_fds;
2158 :
2159 : size_t num_servers;
2160 : size_t num_prepared;
2161 : };
2162 :
2163 : /*
2164 : * Tell whoever invoked samba-dcerpcd we're ready to
2165 : * serve.
2166 : */
2167 37 : static void rpc_host_report_readiness(
2168 : struct tevent_context *ev,
2169 : struct tevent_immediate *im,
2170 : void *private_data)
2171 : {
2172 37 : struct rpc_host_state *state = talloc_get_type_abort(
2173 : private_data, struct rpc_host_state);
2174 37 : int i, num_fds = talloc_array_length(state->ready_signal_fds);
2175 :
2176 37 : if (!state->is_ready) {
2177 0 : DBG_DEBUG("Not yet ready\n");
2178 0 : return;
2179 : }
2180 :
2181 66 : for (i=0; i<num_fds; i++) {
2182 29 : uint8_t byte = 0;
2183 : ssize_t nwritten;
2184 :
2185 : do {
2186 29 : nwritten = write(
2187 29 : state->ready_signal_fds[i],
2188 : (void *)&byte,
2189 : sizeof(byte));
2190 29 : } while ((nwritten == -1) && (errno == EINTR));
2191 :
2192 29 : close(state->ready_signal_fds[i]);
2193 : }
2194 :
2195 37 : TALLOC_FREE(state->ready_signal_fds);
2196 : }
2197 :
2198 : /*
2199 : * Respond to a "are you ready" message.
2200 : */
2201 109 : static bool rpc_host_ready_signal_filter(
2202 : struct messaging_rec *rec, void *private_data)
2203 : {
2204 109 : struct rpc_host_state *state = talloc_get_type_abort(
2205 : private_data, struct rpc_host_state);
2206 109 : size_t num_fds = talloc_array_length(state->ready_signal_fds);
2207 109 : int *tmp = NULL;
2208 :
2209 109 : if (rec->msg_type != MSG_DAEMON_READY_FD) {
2210 106 : return false;
2211 : }
2212 3 : if (rec->num_fds != 1) {
2213 0 : DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2214 0 : return false;
2215 : }
2216 :
2217 3 : if (num_fds + 1 < num_fds) {
2218 0 : return false;
2219 : }
2220 3 : tmp = talloc_realloc(state, state->ready_signal_fds, int, num_fds+1);
2221 3 : if (tmp == NULL) {
2222 0 : return false;
2223 : }
2224 3 : state->ready_signal_fds = tmp;
2225 :
2226 3 : state->ready_signal_fds[num_fds] = rec->fds[0];
2227 3 : rec->fds[0] = -1;
2228 :
2229 3 : tevent_schedule_immediate(
2230 : state->ready_signal_immediate,
2231 : state->ev,
2232 : rpc_host_report_readiness,
2233 : state);
2234 :
2235 3 : return false;
2236 : }
2237 :
2238 : /*
2239 : * Respond to a "what is your status" message.
2240 : */
2241 109 : static bool rpc_host_dump_status_filter(
2242 : struct messaging_rec *rec, void *private_data)
2243 : {
2244 109 : struct rpc_host_state *state = talloc_get_type_abort(
2245 : private_data, struct rpc_host_state);
2246 109 : struct rpc_host *host = state->host;
2247 109 : struct rpc_server **servers = host->servers;
2248 109 : size_t i, num_servers = talloc_array_length(servers);
2249 109 : FILE *f = NULL;
2250 : int fd;
2251 :
2252 109 : if (rec->msg_type != MSG_RPC_DUMP_STATUS) {
2253 109 : return false;
2254 : }
2255 0 : if (rec->num_fds != 1) {
2256 0 : DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2257 0 : return false;
2258 : }
2259 :
2260 0 : fd = dup(rec->fds[0]);
2261 0 : if (fd == -1) {
2262 0 : DBG_DEBUG("dup(%"PRIi64") failed: %s\n",
2263 : rec->fds[0],
2264 : strerror(errno));
2265 0 : return false;
2266 : }
2267 :
2268 0 : f = fdopen(fd, "w");
2269 0 : if (f == NULL) {
2270 0 : DBG_DEBUG("fdopen failed: %s\n", strerror(errno));
2271 0 : close(fd);
2272 0 : return false;
2273 : }
2274 :
2275 0 : for (i=0; i<num_servers; i++) {
2276 0 : struct rpc_server *server = servers[i];
2277 0 : size_t j, num_workers = talloc_array_length(server->workers);
2278 0 : size_t active_workers = 0;
2279 :
2280 0 : for (j=0; j<num_workers; j++) {
2281 0 : if (server->workers[j].pid != -1) {
2282 0 : active_workers += 1;
2283 : }
2284 : }
2285 :
2286 0 : fprintf(f,
2287 : "%s: active_workers=%zu\n",
2288 : server->rpc_server_exe,
2289 : active_workers);
2290 :
2291 0 : for (j=0; j<num_workers; j++) {
2292 0 : struct rpc_work_process *w = &server->workers[j];
2293 :
2294 0 : if (w->pid == (pid_t)-1) {
2295 0 : continue;
2296 : }
2297 :
2298 0 : fprintf(f,
2299 : " worker[%zu]: pid=%d, num_clients=%"PRIu32"\n",
2300 : j,
2301 0 : (int)w->pid,
2302 : w->num_clients);
2303 : }
2304 : }
2305 :
2306 0 : fclose(f);
2307 :
2308 0 : return false;
2309 : }
2310 :
2311 : static void rpc_host_server_setup_done(struct tevent_req *subreq);
2312 : static void rpc_host_endpoint_failed(struct tevent_req *subreq);
2313 :
2314 : /*
2315 : * Async startup for samba-dcerpcd.
2316 : */
2317 34 : static struct tevent_req *rpc_host_send(
2318 : TALLOC_CTX *mem_ctx,
2319 : struct tevent_context *ev,
2320 : struct messaging_context *msg_ctx,
2321 : char *servers,
2322 : int ready_signal_fd,
2323 : const char *daemon_ready_progname,
2324 : bool is_np_helper)
2325 : {
2326 34 : struct tevent_req *req = NULL, *subreq = NULL;
2327 34 : struct rpc_host_state *state = NULL;
2328 34 : struct rpc_host *host = NULL;
2329 34 : struct tevent_signal *se = NULL;
2330 34 : char *epmdb_path = NULL;
2331 34 : char *exe = NULL;
2332 34 : size_t i, num_servers = strv_count(servers);
2333 : NTSTATUS status;
2334 : int ret;
2335 :
2336 34 : req = tevent_req_create(req, &state, struct rpc_host_state);
2337 34 : if (req == NULL) {
2338 0 : return NULL;
2339 : }
2340 34 : state->ev = ev;
2341 34 : state->daemon_ready_progname = daemon_ready_progname;
2342 :
2343 34 : state->ready_signal_immediate = tevent_create_immediate(state);
2344 34 : if (tevent_req_nomem(state->ready_signal_immediate, req)) {
2345 0 : return tevent_req_post(req, ev);
2346 : }
2347 :
2348 34 : if (ready_signal_fd != -1) {
2349 26 : state->ready_signal_fds = talloc_array(state, int, 1);
2350 26 : if (tevent_req_nomem(state->ready_signal_fds, req)) {
2351 0 : return tevent_req_post(req, ev);
2352 : }
2353 26 : state->ready_signal_fds[0] = ready_signal_fd;
2354 : }
2355 :
2356 34 : state->host = talloc_zero(state, struct rpc_host);
2357 34 : if (tevent_req_nomem(state->host, req)) {
2358 0 : return tevent_req_post(req, ev);
2359 : }
2360 34 : host = state->host;
2361 :
2362 34 : host->msg_ctx = msg_ctx;
2363 34 : host->np_helper = is_np_helper;
2364 :
2365 34 : ret = pipe(host->worker_stdin);
2366 34 : if (ret == -1) {
2367 0 : tevent_req_nterror(req, map_nt_error_from_unix(errno));
2368 0 : return tevent_req_post(req, ev);
2369 : }
2370 :
2371 34 : host->servers = talloc_zero_array(
2372 : host, struct rpc_server *, num_servers);
2373 34 : if (tevent_req_nomem(host->servers, req)) {
2374 0 : return tevent_req_post(req, ev);
2375 : }
2376 :
2377 34 : se = tevent_add_signal(ev, state, SIGCHLD, 0, rpc_host_sigchld, host);
2378 34 : if (tevent_req_nomem(se, req)) {
2379 0 : return tevent_req_post(req, ev);
2380 : }
2381 34 : BlockSignals(false, SIGCHLD);
2382 :
2383 34 : status = messaging_register(
2384 : msg_ctx,
2385 : host,
2386 : MSG_RPC_WORKER_STATUS,
2387 : rpc_host_child_status_recv);
2388 34 : if (tevent_req_nterror(req, status)) {
2389 0 : return tevent_req_post(req, ev);
2390 : }
2391 :
2392 34 : status = messaging_register(
2393 : msg_ctx, req, MSG_SHUTDOWN, rpc_host_msg_shutdown);
2394 34 : if (tevent_req_nterror(req, status)) {
2395 0 : return tevent_req_post(req, ev);
2396 : }
2397 :
2398 34 : subreq = messaging_filtered_read_send(
2399 : state, ev, msg_ctx, rpc_host_ready_signal_filter, state);
2400 34 : if (tevent_req_nomem(subreq, req)) {
2401 0 : return tevent_req_post(req, ev);
2402 : }
2403 :
2404 34 : subreq = messaging_filtered_read_send(
2405 : state, ev, msg_ctx, rpc_host_dump_status_filter, state);
2406 34 : if (tevent_req_nomem(subreq, req)) {
2407 0 : return tevent_req_post(req, ev);
2408 : }
2409 :
2410 34 : epmdb_path = lock_path(state, "epmdb.tdb");
2411 34 : if (tevent_req_nomem(epmdb_path, req)) {
2412 0 : return tevent_req_post(req, ev);
2413 : }
2414 :
2415 34 : host->epmdb = tdb_wrap_open(
2416 : host,
2417 : epmdb_path,
2418 : 0,
2419 : TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
2420 : O_RDWR|O_CREAT,
2421 : 0644);
2422 34 : if (host->epmdb == NULL) {
2423 0 : DBG_DEBUG("tdb_wrap_open(%s) failed: %s\n",
2424 : epmdb_path,
2425 : strerror(errno));
2426 0 : tevent_req_nterror(req, map_nt_error_from_unix(errno));
2427 0 : return tevent_req_post(req, ev);
2428 : }
2429 34 : TALLOC_FREE(epmdb_path);
2430 :
2431 241 : for (exe = strv_next(servers, exe), i = 0;
2432 99 : exe != NULL;
2433 272 : exe = strv_next(servers, exe), i++) {
2434 :
2435 272 : DBG_DEBUG("server_setup for %s index %zu\n", exe, i);
2436 :
2437 272 : subreq = rpc_server_setup_send(
2438 : state,
2439 : ev,
2440 : host,
2441 : exe);
2442 272 : if (tevent_req_nomem(subreq, req)) {
2443 0 : return tevent_req_post(req, ev);
2444 : }
2445 272 : tevent_req_set_callback(
2446 : subreq, rpc_host_server_setup_done, req);
2447 : }
2448 :
2449 34 : return req;
2450 : }
2451 :
2452 : /*
2453 : * Timer function called after we were initialized but no one
2454 : * connected. Shutdown.
2455 : */
2456 0 : static void rpc_host_shutdown(
2457 : struct tevent_context *ev,
2458 : struct tevent_timer *te,
2459 : struct timeval current_time,
2460 : void *private_data)
2461 : {
2462 0 : struct tevent_req *req = talloc_get_type_abort(
2463 : private_data, struct tevent_req);
2464 0 : DBG_DEBUG("Nobody connected -- shutting down\n");
2465 0 : tevent_req_done(req);
2466 0 : }
2467 :
2468 272 : static void rpc_host_server_setup_done(struct tevent_req *subreq)
2469 : {
2470 272 : struct tevent_req *req = tevent_req_callback_data(
2471 : subreq, struct tevent_req);
2472 272 : struct rpc_host_state *state = tevent_req_data(
2473 : req, struct rpc_host_state);
2474 272 : struct rpc_server *server = NULL;
2475 272 : struct rpc_host *host = state->host;
2476 272 : size_t i, num_servers = talloc_array_length(host->servers);
2477 : NTSTATUS status;
2478 :
2479 272 : status = rpc_server_setup_recv(subreq, host, &server);
2480 272 : TALLOC_FREE(subreq);
2481 272 : if (!NT_STATUS_IS_OK(status)) {
2482 0 : DBG_DEBUG("rpc_server_setup_recv returned %s, ignoring\n",
2483 : nt_errstr(status));
2484 0 : host->servers = talloc_realloc(
2485 : host,
2486 : host->servers,
2487 : struct rpc_server *,
2488 : num_servers-1);
2489 238 : return;
2490 : }
2491 :
2492 272 : server->server_index = state->num_prepared;
2493 272 : host->servers[state->num_prepared] = server;
2494 :
2495 272 : state->num_prepared += 1;
2496 :
2497 272 : if (state->num_prepared < num_servers) {
2498 238 : return;
2499 : }
2500 :
2501 306 : for (i=0; i<num_servers; i++) {
2502 : size_t j, num_endpoints;
2503 :
2504 272 : server = host->servers[i];
2505 272 : num_endpoints = talloc_array_length(server->endpoints);
2506 :
2507 928 : for (j=0; j<num_endpoints; j++) {
2508 656 : subreq = rpc_host_endpoint_accept_send(
2509 656 : state, state->ev, server->endpoints[j]);
2510 656 : if (tevent_req_nomem(subreq, req)) {
2511 0 : return;
2512 : }
2513 656 : tevent_req_set_callback(
2514 : subreq, rpc_host_endpoint_failed, req);
2515 : }
2516 : }
2517 :
2518 34 : state->is_ready = true;
2519 :
2520 34 : if (state->daemon_ready_progname != NULL) {
2521 8 : daemon_ready(state->daemon_ready_progname);
2522 : }
2523 :
2524 34 : if (host->np_helper) {
2525 : /*
2526 : * If we're started as an np helper, and no one talks to
2527 : * us within 10 seconds, just shut ourselves down.
2528 : */
2529 26 : host->np_helper_shutdown = tevent_add_timer(
2530 : state->ev,
2531 : state,
2532 : timeval_current_ofs(10, 0),
2533 : rpc_host_shutdown,
2534 : req);
2535 26 : if (tevent_req_nomem(host->np_helper_shutdown, req)) {
2536 0 : return;
2537 : }
2538 : }
2539 :
2540 34 : tevent_schedule_immediate(
2541 : state->ready_signal_immediate,
2542 : state->ev,
2543 : rpc_host_report_readiness,
2544 : state);
2545 : }
2546 :
2547 : /*
2548 : * Log accept fail on an endpoint.
2549 : */
2550 0 : static void rpc_host_endpoint_failed(struct tevent_req *subreq)
2551 : {
2552 0 : struct tevent_req *req = tevent_req_callback_data(
2553 : subreq, struct tevent_req);
2554 0 : struct rpc_host_state *state = tevent_req_data(
2555 : req, struct rpc_host_state);
2556 0 : struct rpc_host_endpoint *endpoint = NULL;
2557 0 : char *binding_string = NULL;
2558 : int ret;
2559 :
2560 0 : ret = rpc_host_endpoint_accept_recv(subreq, &endpoint);
2561 0 : TALLOC_FREE(subreq);
2562 :
2563 0 : binding_string = dcerpc_binding_string(state, endpoint->binding);
2564 0 : DBG_DEBUG("rpc_host_endpoint_accept_recv for %s returned %s\n",
2565 : binding_string,
2566 : strerror(ret));
2567 0 : TALLOC_FREE(binding_string);
2568 0 : }
2569 :
2570 8 : static NTSTATUS rpc_host_recv(struct tevent_req *req)
2571 : {
2572 8 : return tevent_req_simple_recv_ntstatus(req);
2573 : }
2574 :
2575 37 : static int rpc_host_pidfile_create(
2576 : struct messaging_context *msg_ctx,
2577 : const char *progname,
2578 : int ready_signal_fd)
2579 37 : {
2580 37 : const char *piddir = lp_pid_directory();
2581 37 : size_t len = strlen(piddir) + strlen(progname) + 6;
2582 37 : char pidFile[len];
2583 : pid_t existing_pid;
2584 : int fd, ret;
2585 :
2586 37 : snprintf(pidFile,
2587 : sizeof(pidFile),
2588 : "%s/%s.pid",
2589 : piddir, progname);
2590 :
2591 37 : ret = pidfile_path_create(pidFile, &fd, &existing_pid);
2592 37 : if (ret == 0) {
2593 : /* leak fd */
2594 34 : return 0;
2595 : }
2596 :
2597 3 : if (ret != EAGAIN) {
2598 0 : DBG_DEBUG("pidfile_path_create() failed: %s\n",
2599 : strerror(ret));
2600 0 : return ret;
2601 : }
2602 :
2603 3 : DBG_DEBUG("%s pid %d exists\n", progname, (int)existing_pid);
2604 :
2605 3 : if (ready_signal_fd != -1) {
2606 3 : NTSTATUS status = messaging_send_iov(
2607 : msg_ctx,
2608 : pid_to_procid(existing_pid),
2609 : MSG_DAEMON_READY_FD,
2610 : NULL,
2611 : 0,
2612 : &ready_signal_fd,
2613 : 1);
2614 3 : if (!NT_STATUS_IS_OK(status)) {
2615 0 : DBG_DEBUG("Could not send ready_signal_fd: %s\n",
2616 : nt_errstr(status));
2617 : }
2618 : }
2619 :
2620 3 : return EAGAIN;
2621 : }
2622 :
2623 8 : static void samba_dcerpcd_stdin_handler(
2624 : struct tevent_context *ev,
2625 : struct tevent_fd *fde,
2626 : uint16_t flags,
2627 : void *private_data)
2628 : {
2629 8 : struct tevent_req *req = talloc_get_type_abort(
2630 : private_data, struct tevent_req);
2631 : char c;
2632 :
2633 8 : if (read(0, &c, 1) != 1) {
2634 : /* we have reached EOF on stdin, which means the
2635 : parent has exited. Shutdown the server */
2636 8 : tevent_req_done(req);
2637 : }
2638 8 : }
2639 :
2640 : /*
2641 : * samba-dcerpcd microservice startup !
2642 : */
2643 29 : int main(int argc, const char *argv[])
2644 : {
2645 26 : const struct loadparm_substitution *lp_sub =
2646 3 : loadparm_s3_global_substitution();
2647 29 : const char *progname = getprogname();
2648 29 : TALLOC_CTX *frame = NULL;
2649 29 : struct tevent_context *ev_ctx = NULL;
2650 29 : struct messaging_context *msg_ctx = NULL;
2651 29 : struct tevent_req *req = NULL;
2652 29 : char *servers = NULL;
2653 29 : const char *arg = NULL;
2654 : size_t num_servers;
2655 : poptContext pc;
2656 : int ret, err;
2657 : NTSTATUS status;
2658 : bool log_stdout;
2659 : bool ok;
2660 :
2661 29 : int libexec_rpcds = 0;
2662 29 : int np_helper = 0;
2663 29 : int ready_signal_fd = -1;
2664 :
2665 29 : struct samba_cmdline_daemon_cfg *cmdline_daemon_cfg = NULL;
2666 116 : struct poptOption long_options[] = {
2667 : POPT_AUTOHELP
2668 : {
2669 : .longName = "libexec-rpcds",
2670 : .argInfo = POPT_ARG_NONE,
2671 : .arg = &libexec_rpcds,
2672 : .descrip = "Use all rpcds in libexec",
2673 : },
2674 : {
2675 : .longName = "ready-signal-fd",
2676 : .argInfo = POPT_ARG_INT,
2677 : .arg = &ready_signal_fd,
2678 : .descrip = "fd to close when initialized",
2679 : },
2680 : {
2681 : .longName = "np-helper",
2682 : .argInfo = POPT_ARG_NONE,
2683 : .arg = &np_helper,
2684 : .descrip = "Internal named pipe server",
2685 : },
2686 29 : POPT_COMMON_SAMBA
2687 29 : POPT_COMMON_DAEMON
2688 29 : POPT_COMMON_VERSION
2689 : POPT_TABLEEND
2690 : };
2691 :
2692 : {
2693 29 : const char *fd_params[] = { "ready-signal-fd", };
2694 :
2695 29 : closefrom_except_fd_params(
2696 : 3, ARRAY_SIZE(fd_params), fd_params, argc, argv);
2697 : }
2698 :
2699 29 : talloc_enable_null_tracking();
2700 29 : frame = talloc_stackframe();
2701 29 : umask(0);
2702 29 : sec_init();
2703 29 : smb_init_locale();
2704 :
2705 29 : ok = samba_cmdline_init(frame,
2706 : SAMBA_CMDLINE_CONFIG_SERVER,
2707 : true /* require_smbconf */);
2708 29 : if (!ok) {
2709 0 : DBG_ERR("Failed to init cmdline parser!\n");
2710 0 : TALLOC_FREE(frame);
2711 0 : exit(ENOMEM);
2712 : }
2713 :
2714 29 : pc = samba_popt_get_context(getprogname(),
2715 : argc,
2716 : argv,
2717 : long_options,
2718 : 0);
2719 29 : if (pc == NULL) {
2720 0 : DBG_ERR("Failed to setup popt context!\n");
2721 0 : TALLOC_FREE(frame);
2722 0 : exit(1);
2723 : }
2724 :
2725 29 : poptSetOtherOptionHelp(
2726 : pc, "[OPTIONS] [SERVICE_1 SERVICE_2 .. SERVICE_N]");
2727 :
2728 29 : ret = poptGetNextOpt(pc);
2729 :
2730 29 : if (ret != -1) {
2731 0 : if (ret >= 0) {
2732 0 : fprintf(stderr,
2733 : "\nGot unexpected option %d\n",
2734 : ret);
2735 0 : } else if (ret == POPT_ERROR_BADOPT) {
2736 0 : fprintf(stderr,
2737 : "\nInvalid option %s: %s\n\n",
2738 : poptBadOption(pc, 0),
2739 : poptStrerror(ret));
2740 : } else {
2741 0 : fprintf(stderr,
2742 : "\npoptGetNextOpt returned %s\n",
2743 : poptStrerror(ret));
2744 : }
2745 :
2746 0 : poptFreeContext(pc);
2747 0 : TALLOC_FREE(frame);
2748 0 : exit(1);
2749 : }
2750 :
2751 55 : while ((arg = poptGetArg(pc)) != NULL) {
2752 0 : ret = strv_add(frame, &servers, arg);
2753 0 : if (ret != 0) {
2754 0 : DBG_ERR("strv_add() failed\n");
2755 0 : poptFreeContext(pc);
2756 0 : TALLOC_FREE(frame);
2757 0 : exit(1);
2758 : }
2759 : }
2760 :
2761 29 : log_stdout = (debug_get_log_type() == DEBUG_STDOUT);
2762 29 : if (log_stdout) {
2763 8 : setup_logging(progname, DEBUG_STDOUT);
2764 : } else {
2765 21 : setup_logging(progname, DEBUG_FILE);
2766 : }
2767 :
2768 : /*
2769 : * If "rpc start on demand helpers = true" in smb.conf we must
2770 : * not start as standalone, only on demand from
2771 : * local_np_connect() functions. Log an error message telling
2772 : * the admin how to fix and then exit.
2773 : */
2774 29 : if (lp_rpc_start_on_demand_helpers() && np_helper == 0) {
2775 0 : DBG_ERR("Cannot start in standalone mode if smb.conf "
2776 : "[global] setting "
2777 : "\"rpc start on demand helpers = true\" - "
2778 : "exiting\n");
2779 0 : TALLOC_FREE(frame);
2780 0 : exit(1);
2781 : }
2782 :
2783 29 : if (libexec_rpcds != 0) {
2784 29 : ret = rpc_host_list_servers(
2785 : dyn_SAMBA_LIBEXECDIR, frame, &servers);
2786 29 : if (ret != 0) {
2787 0 : DBG_ERR("Could not list libexec: %s\n",
2788 : strerror(ret));
2789 0 : poptFreeContext(pc);
2790 0 : TALLOC_FREE(frame);
2791 0 : exit(1);
2792 : }
2793 : }
2794 :
2795 29 : num_servers = strv_count(servers);
2796 29 : if (num_servers == 0) {
2797 0 : poptPrintUsage(pc, stderr, 0);
2798 0 : poptFreeContext(pc);
2799 0 : TALLOC_FREE(frame);
2800 0 : exit(1);
2801 : }
2802 :
2803 29 : poptFreeContext(pc);
2804 :
2805 29 : cmdline_daemon_cfg = samba_cmdline_get_daemon_cfg();
2806 :
2807 29 : if (log_stdout && cmdline_daemon_cfg->fork) {
2808 0 : DBG_ERR("Can't log to stdout unless in foreground\n");
2809 0 : TALLOC_FREE(frame);
2810 0 : exit(1);
2811 : }
2812 :
2813 29 : msg_ctx = global_messaging_context();
2814 29 : if (msg_ctx == NULL) {
2815 0 : DBG_ERR("messaging_init() failed\n");
2816 0 : TALLOC_FREE(frame);
2817 0 : exit(1);
2818 : }
2819 29 : ev_ctx = messaging_tevent_context(msg_ctx);
2820 :
2821 29 : if (cmdline_daemon_cfg->fork) {
2822 42 : become_daemon(
2823 : true,
2824 21 : cmdline_daemon_cfg->no_process_group,
2825 : log_stdout);
2826 :
2827 29 : status = reinit_after_fork(msg_ctx, ev_ctx, false, NULL);
2828 29 : if (!NT_STATUS_IS_OK(status)) {
2829 0 : exit_daemon("reinit_after_fork() failed",
2830 : map_errno_from_nt_status(status));
2831 : }
2832 : } else {
2833 8 : DBG_DEBUG("Calling daemon_status\n");
2834 8 : daemon_status(progname, "Starting process ... ");
2835 : }
2836 :
2837 37 : BlockSignals(true, SIGPIPE);
2838 :
2839 37 : dump_core_setup(progname, lp_logfile(frame, lp_sub));
2840 :
2841 37 : DEBUG(0, ("%s version %s started.\n",
2842 : progname,
2843 : samba_version_string()));
2844 37 : DEBUGADD(0,("%s\n", COPYRIGHT_STARTUP_MESSAGE));
2845 :
2846 37 : reopen_logs();
2847 :
2848 37 : (void)winbind_off();
2849 37 : ok = init_guest_session_info(frame);
2850 37 : (void)winbind_on();
2851 37 : if (!ok) {
2852 0 : DBG_ERR("init_guest_session_info failed\n");
2853 0 : global_messaging_context_free();
2854 0 : TALLOC_FREE(frame);
2855 0 : exit(1);
2856 : }
2857 :
2858 37 : ret = rpc_host_pidfile_create(msg_ctx, progname, ready_signal_fd);
2859 37 : if (ret != 0) {
2860 3 : DBG_DEBUG("rpc_host_pidfile_create failed: %s\n",
2861 : strerror(ret));
2862 3 : global_messaging_context_free();
2863 3 : TALLOC_FREE(frame);
2864 3 : exit(1);
2865 : }
2866 :
2867 57 : req = rpc_host_send(
2868 : ev_ctx,
2869 : ev_ctx,
2870 : msg_ctx,
2871 : servers,
2872 : ready_signal_fd,
2873 34 : cmdline_daemon_cfg->fork ? NULL : progname,
2874 : np_helper != 0);
2875 34 : if (req == NULL) {
2876 0 : DBG_ERR("rpc_host_send failed\n");
2877 0 : global_messaging_context_free();
2878 0 : TALLOC_FREE(frame);
2879 0 : exit(1);
2880 : }
2881 :
2882 34 : if (!cmdline_daemon_cfg->fork) {
2883 : struct stat st;
2884 8 : if (fstat(0, &st) != 0) {
2885 0 : DBG_DEBUG("fstat(0) failed: %s\n",
2886 : strerror(errno));
2887 0 : global_messaging_context_free();
2888 0 : TALLOC_FREE(frame);
2889 0 : exit(1);
2890 : }
2891 8 : if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) {
2892 8 : tevent_add_fd(
2893 : ev_ctx,
2894 : ev_ctx,
2895 : 0,
2896 : TEVENT_FD_READ,
2897 : samba_dcerpcd_stdin_handler,
2898 : req);
2899 : }
2900 : }
2901 :
2902 34 : ok = tevent_req_poll_unix(req, ev_ctx, &err);
2903 8 : if (!ok) {
2904 0 : DBG_ERR("tevent_req_poll_unix failed: %s\n",
2905 : strerror(err));
2906 0 : global_messaging_context_free();
2907 0 : TALLOC_FREE(frame);
2908 0 : exit(1);
2909 : }
2910 :
2911 8 : status = rpc_host_recv(req);
2912 8 : if (!NT_STATUS_IS_OK(status)) {
2913 0 : DBG_ERR("rpc_host_recv returned %s\n", nt_errstr(status));
2914 0 : global_messaging_context_free();
2915 0 : TALLOC_FREE(frame);
2916 0 : exit(1);
2917 : }
2918 :
2919 8 : TALLOC_FREE(frame);
2920 :
2921 8 : return 0;
2922 : }
|