]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
dc7146f8cc9824c2e82d1acb23190ca22fa96c2f
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
41
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
57
58 #include <modules/reserve-wrap.h>
59
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
62
63 /* #define DEBUG_TIMING */
64
65 #define DEFAULT_DEVICE "default"
66
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
78
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
92
93 struct userdata {
94 pa_core *core;
95 pa_module *module;
96 pa_sink *sink;
97
98 pa_thread *thread;
99 pa_thread_mq thread_mq;
100 pa_rtpoll *rtpoll;
101
102 snd_pcm_t *pcm_handle;
103
104 char *paths_dir;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
110
111 pa_cvolume hardware_volume;
112
113 unsigned int *rates;
114
115 size_t
116 frame_size,
117 fragment_size,
118 hwbuf_size,
119 tsched_watermark,
120 tsched_watermark_ref,
121 hwbuf_unused,
122 min_sleep,
123 min_wakeup,
124 watermark_inc_step,
125 watermark_dec_step,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
128 rewind_safeguard;
129
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132 pa_usec_t tsched_watermark_usec;
133
134 pa_memchunk memchunk;
135
136 char *device_name; /* name of the PCM device */
137 char *control_device; /* name of the control device */
138
139 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
140
141 pa_bool_t first, after_rewind;
142
143 pa_rtpoll_item *alsa_rtpoll_item;
144
145 pa_smoother *smoother;
146 uint64_t write_count;
147 uint64_t since_start;
148 pa_usec_t smoother_interval;
149 pa_usec_t last_smoother_update;
150
151 pa_idxset *formats;
152
153 pa_reserve_wrapper *reserve;
154 pa_hook_slot *reserve_slot;
155 pa_reserve_monitor_wrapper *monitor;
156 pa_hook_slot *monitor_slot;
157
158 /* ucm context */
159 pa_alsa_ucm_mapping_context *ucm_context;
160 };
161
162 static void userdata_free(struct userdata *u);
163
164 /* FIXME: Is there a better way to do this than device names? */
165 static pa_bool_t is_iec958(struct userdata *u) {
166 return (strncmp("iec958", u->device_name, 6) == 0);
167 }
168
169 static pa_bool_t is_hdmi(struct userdata *u) {
170 return (strncmp("hdmi", u->device_name, 4) == 0);
171 }
172
173 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
174 pa_assert(r);
175 pa_assert(u);
176
177 pa_log_debug("Suspending sink %s, because another application requested us to release the device.", u->sink->name);
178
179 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
180 return PA_HOOK_CANCEL;
181
182 return PA_HOOK_OK;
183 }
184
185 static void reserve_done(struct userdata *u) {
186 pa_assert(u);
187
188 if (u->reserve_slot) {
189 pa_hook_slot_free(u->reserve_slot);
190 u->reserve_slot = NULL;
191 }
192
193 if (u->reserve) {
194 pa_reserve_wrapper_unref(u->reserve);
195 u->reserve = NULL;
196 }
197 }
198
199 static void reserve_update(struct userdata *u) {
200 const char *description;
201 pa_assert(u);
202
203 if (!u->sink || !u->reserve)
204 return;
205
206 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
207 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
208 }
209
210 static int reserve_init(struct userdata *u, const char *dname) {
211 char *rname;
212
213 pa_assert(u);
214 pa_assert(dname);
215
216 if (u->reserve)
217 return 0;
218
219 if (pa_in_system_mode())
220 return 0;
221
222 if (!(rname = pa_alsa_get_reserve_name(dname)))
223 return 0;
224
225 /* We are resuming, try to lock the device */
226 u->reserve = pa_reserve_wrapper_get(u->core, rname);
227 pa_xfree(rname);
228
229 if (!(u->reserve))
230 return -1;
231
232 reserve_update(u);
233
234 pa_assert(!u->reserve_slot);
235 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
236
237 return 0;
238 }
239
240 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
241 pa_assert(w);
242 pa_assert(u);
243
244 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
245 pa_log_debug("Suspending sink %s, because another application is blocking the access to the device.", u->sink->name);
246 pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION);
247 } else {
248 pa_log_debug("Resuming sink %s, because other applications aren't blocking access to the device any more.", u->sink->name);
249 pa_sink_suspend(u->sink, false, PA_SUSPEND_APPLICATION);
250 }
251
252 return PA_HOOK_OK;
253 }
254
255 static void monitor_done(struct userdata *u) {
256 pa_assert(u);
257
258 if (u->monitor_slot) {
259 pa_hook_slot_free(u->monitor_slot);
260 u->monitor_slot = NULL;
261 }
262
263 if (u->monitor) {
264 pa_reserve_monitor_wrapper_unref(u->monitor);
265 u->monitor = NULL;
266 }
267 }
268
269 static int reserve_monitor_init(struct userdata *u, const char *dname) {
270 char *rname;
271
272 pa_assert(u);
273 pa_assert(dname);
274
275 if (pa_in_system_mode())
276 return 0;
277
278 if (!(rname = pa_alsa_get_reserve_name(dname)))
279 return 0;
280
281 /* We are resuming, try to lock the device */
282 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
283 pa_xfree(rname);
284
285 if (!(u->monitor))
286 return -1;
287
288 pa_assert(!u->monitor_slot);
289 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
290
291 return 0;
292 }
293
294 static void fix_min_sleep_wakeup(struct userdata *u) {
295 size_t max_use, max_use_2;
296
297 pa_assert(u);
298 pa_assert(u->use_tsched);
299
300 max_use = u->hwbuf_size - u->hwbuf_unused;
301 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
302
303 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
304 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
305
306 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
307 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
308 }
309
310 static void fix_tsched_watermark(struct userdata *u) {
311 size_t max_use;
312 pa_assert(u);
313 pa_assert(u->use_tsched);
314
315 max_use = u->hwbuf_size - u->hwbuf_unused;
316
317 if (u->tsched_watermark > max_use - u->min_sleep)
318 u->tsched_watermark = max_use - u->min_sleep;
319
320 if (u->tsched_watermark < u->min_wakeup)
321 u->tsched_watermark = u->min_wakeup;
322
323 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
324 }
325
326 static void increase_watermark(struct userdata *u) {
327 size_t old_watermark;
328 pa_usec_t old_min_latency, new_min_latency;
329
330 pa_assert(u);
331 pa_assert(u->use_tsched);
332
333 /* First, just try to increase the watermark */
334 old_watermark = u->tsched_watermark;
335 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
336 fix_tsched_watermark(u);
337
338 if (old_watermark != u->tsched_watermark) {
339 pa_log_info("Increasing wakeup watermark to %0.2f ms",
340 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
341 return;
342 }
343
344 /* Hmm, we cannot increase the watermark any further, hence let's
345 raise the latency, unless doing so was disabled in
346 configuration */
347 if (u->fixed_latency_range)
348 return;
349
350 old_min_latency = u->sink->thread_info.min_latency;
351 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
352 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
353
354 if (old_min_latency != new_min_latency) {
355 pa_log_info("Increasing minimal latency to %0.2f ms",
356 (double) new_min_latency / PA_USEC_PER_MSEC);
357
358 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
359 }
360
361 /* When we reach this we're officialy fucked! */
362 }
363
364 static void decrease_watermark(struct userdata *u) {
365 size_t old_watermark;
366 pa_usec_t now;
367
368 pa_assert(u);
369 pa_assert(u->use_tsched);
370
371 now = pa_rtclock_now();
372
373 if (u->watermark_dec_not_before <= 0)
374 goto restart;
375
376 if (u->watermark_dec_not_before > now)
377 return;
378
379 old_watermark = u->tsched_watermark;
380
381 if (u->tsched_watermark < u->watermark_dec_step)
382 u->tsched_watermark = u->tsched_watermark / 2;
383 else
384 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
385
386 fix_tsched_watermark(u);
387
388 if (old_watermark != u->tsched_watermark)
389 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
390 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
391
392 /* We don't change the latency range*/
393
394 restart:
395 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
396 }
397
398 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
399 pa_usec_t usec, wm;
400
401 pa_assert(sleep_usec);
402 pa_assert(process_usec);
403
404 pa_assert(u);
405 pa_assert(u->use_tsched);
406
407 usec = pa_sink_get_requested_latency_within_thread(u->sink);
408
409 if (usec == (pa_usec_t) -1)
410 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
411
412 wm = u->tsched_watermark_usec;
413
414 if (wm > usec)
415 wm = usec/2;
416
417 *sleep_usec = usec - wm;
418 *process_usec = wm;
419
420 #ifdef DEBUG_TIMING
421 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
422 (unsigned long) (usec / PA_USEC_PER_MSEC),
423 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
424 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
425 #endif
426 }
427
428 static int try_recover(struct userdata *u, const char *call, int err) {
429 pa_assert(u);
430 pa_assert(call);
431 pa_assert(err < 0);
432
433 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
434
435 pa_assert(err != -EAGAIN);
436
437 if (err == -EPIPE)
438 pa_log_debug("%s: Buffer underrun!", call);
439
440 if (err == -ESTRPIPE)
441 pa_log_debug("%s: System suspended!", call);
442
443 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
444 pa_log("%s: %s", call, pa_alsa_strerror(err));
445 return -1;
446 }
447
448 u->first = TRUE;
449 u->since_start = 0;
450 return 0;
451 }
452
453 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
454 size_t left_to_play;
455 pa_bool_t underrun = FALSE;
456
457 /* We use <= instead of < for this check here because an underrun
458 * only happens after the last sample was processed, not already when
459 * it is removed from the buffer. This is particularly important
460 * when block transfer is used. */
461
462 if (n_bytes <= u->hwbuf_size)
463 left_to_play = u->hwbuf_size - n_bytes;
464 else {
465
466 /* We got a dropout. What a mess! */
467 left_to_play = 0;
468 underrun = TRUE;
469
470 #if 0
471 PA_DEBUG_TRAP;
472 #endif
473
474 if (!u->first && !u->after_rewind)
475 if (pa_log_ratelimit(PA_LOG_INFO))
476 pa_log_info("Underrun!");
477 }
478
479 #ifdef DEBUG_TIMING
480 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
481 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
482 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
483 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
484 #endif
485
486 if (u->use_tsched) {
487 pa_bool_t reset_not_before = TRUE;
488
489 if (!u->first && !u->after_rewind) {
490 if (underrun || left_to_play < u->watermark_inc_threshold)
491 increase_watermark(u);
492 else if (left_to_play > u->watermark_dec_threshold) {
493 reset_not_before = FALSE;
494
495 /* We decrease the watermark only if have actually
496 * been woken up by a timeout. If something else woke
497 * us up it's too easy to fulfill the deadlines... */
498
499 if (on_timeout)
500 decrease_watermark(u);
501 }
502 }
503
504 if (reset_not_before)
505 u->watermark_dec_not_before = 0;
506 }
507
508 return left_to_play;
509 }
510
511 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
512 pa_bool_t work_done = FALSE;
513 pa_usec_t max_sleep_usec = 0, process_usec = 0;
514 size_t left_to_play, input_underrun;
515 unsigned j = 0;
516
517 pa_assert(u);
518 pa_sink_assert_ref(u->sink);
519
520 if (u->use_tsched)
521 hw_sleep_time(u, &max_sleep_usec, &process_usec);
522
523 for (;;) {
524 snd_pcm_sframes_t n;
525 size_t n_bytes;
526 int r;
527 pa_bool_t after_avail = TRUE;
528
529 /* First we determine how many samples are missing to fill the
530 * buffer up to 100% */
531
532 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
533
534 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
535 continue;
536
537 return r;
538 }
539
540 n_bytes = (size_t) n * u->frame_size;
541
542 #ifdef DEBUG_TIMING
543 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
544 #endif
545
546 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
547 on_timeout = FALSE;
548
549 if (u->use_tsched)
550
551 /* We won't fill up the playback buffer before at least
552 * half the sleep time is over because otherwise we might
553 * ask for more data from the clients then they expect. We
554 * need to guarantee that clients only have to keep around
555 * a single hw buffer length. */
556
557 if (!polled &&
558 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
559 #ifdef DEBUG_TIMING
560 pa_log_debug("Not filling up, because too early.");
561 #endif
562 break;
563 }
564
565 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
566
567 if (polled)
568 PA_ONCE_BEGIN {
569 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
570 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
571 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
572 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
573 pa_strnull(dn));
574 pa_xfree(dn);
575 } PA_ONCE_END;
576
577 #ifdef DEBUG_TIMING
578 pa_log_debug("Not filling up, because not necessary.");
579 #endif
580 break;
581 }
582
583
584 if (++j > 10) {
585 #ifdef DEBUG_TIMING
586 pa_log_debug("Not filling up, because already too many iterations.");
587 #endif
588
589 break;
590 }
591
592 n_bytes -= u->hwbuf_unused;
593 polled = FALSE;
594
595 #ifdef DEBUG_TIMING
596 pa_log_debug("Filling up");
597 #endif
598
599 for (;;) {
600 pa_memchunk chunk;
601 void *p;
602 int err;
603 const snd_pcm_channel_area_t *areas;
604 snd_pcm_uframes_t offset, frames;
605 snd_pcm_sframes_t sframes;
606 size_t written;
607
608 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
609 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
610
611 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
612
613 if (!after_avail && err == -EAGAIN)
614 break;
615
616 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
617 continue;
618
619 return r;
620 }
621
622 /* Make sure that if these memblocks need to be copied they will fit into one slot */
623 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
624 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
625
626 if (!after_avail && frames == 0)
627 break;
628
629 pa_assert(frames > 0);
630 after_avail = FALSE;
631
632 /* Check these are multiples of 8 bit */
633 pa_assert((areas[0].first & 7) == 0);
634 pa_assert((areas[0].step & 7)== 0);
635
636 /* We assume a single interleaved memory buffer */
637 pa_assert((areas[0].first >> 3) == 0);
638 pa_assert((areas[0].step >> 3) == u->frame_size);
639
640 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
641
642 written = frames * u->frame_size;
643 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, written, TRUE);
644 chunk.length = pa_memblock_get_length(chunk.memblock);
645 chunk.index = 0;
646
647 pa_sink_render_into_full(u->sink, &chunk);
648 pa_memblock_unref_fixed(chunk.memblock);
649
650 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
651
652 if (!after_avail && (int) sframes == -EAGAIN)
653 break;
654
655 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
656 continue;
657
658 return r;
659 }
660
661 work_done = TRUE;
662
663 u->write_count += written;
664 u->since_start += written;
665
666 #ifdef DEBUG_TIMING
667 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) written, (unsigned long) n_bytes);
668 #endif
669
670 if (written >= n_bytes)
671 break;
672
673 n_bytes -= written;
674 }
675 }
676
677 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
678
679 if (u->use_tsched) {
680 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
681
682 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
683 process_usec = u->tsched_watermark_usec;
684
685 if (*sleep_usec > process_usec)
686 *sleep_usec -= process_usec;
687 else
688 *sleep_usec = 0;
689
690 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
691 } else
692 *sleep_usec = 0;
693
694 return work_done ? 1 : 0;
695 }
696
697 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
698 pa_bool_t work_done = FALSE;
699 pa_usec_t max_sleep_usec = 0, process_usec = 0;
700 size_t left_to_play, input_underrun;
701 unsigned j = 0;
702
703 pa_assert(u);
704 pa_sink_assert_ref(u->sink);
705
706 if (u->use_tsched)
707 hw_sleep_time(u, &max_sleep_usec, &process_usec);
708
709 for (;;) {
710 snd_pcm_sframes_t n;
711 size_t n_bytes;
712 int r;
713 pa_bool_t after_avail = TRUE;
714
715 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
716
717 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
718 continue;
719
720 return r;
721 }
722
723 n_bytes = (size_t) n * u->frame_size;
724
725
726 #ifdef DEBUG_TIMING
727 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
728 #endif
729
730 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
731 on_timeout = FALSE;
732
733 if (u->use_tsched)
734
735 /* We won't fill up the playback buffer before at least
736 * half the sleep time is over because otherwise we might
737 * ask for more data from the clients then they expect. We
738 * need to guarantee that clients only have to keep around
739 * a single hw buffer length. */
740
741 if (!polled &&
742 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
743 break;
744
745 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
746
747 if (polled)
748 PA_ONCE_BEGIN {
749 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
750 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
751 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
752 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
753 pa_strnull(dn));
754 pa_xfree(dn);
755 } PA_ONCE_END;
756
757 break;
758 }
759
760 if (++j > 10) {
761 #ifdef DEBUG_TIMING
762 pa_log_debug("Not filling up, because already too many iterations.");
763 #endif
764
765 break;
766 }
767
768 n_bytes -= u->hwbuf_unused;
769 polled = FALSE;
770
771 for (;;) {
772 snd_pcm_sframes_t frames;
773 void *p;
774 size_t written;
775
776 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
777
778 if (u->memchunk.length <= 0)
779 pa_sink_render(u->sink, n_bytes, &u->memchunk);
780
781 pa_assert(u->memchunk.length > 0);
782
783 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
784
785 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
786 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
787
788 p = pa_memblock_acquire(u->memchunk.memblock);
789 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
790 pa_memblock_release(u->memchunk.memblock);
791
792 if (PA_UNLIKELY(frames < 0)) {
793
794 if (!after_avail && (int) frames == -EAGAIN)
795 break;
796
797 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
798 continue;
799
800 return r;
801 }
802
803 if (!after_avail && frames == 0)
804 break;
805
806 pa_assert(frames > 0);
807 after_avail = FALSE;
808
809 written = frames * u->frame_size;
810 u->memchunk.index += written;
811 u->memchunk.length -= written;
812
813 if (u->memchunk.length <= 0) {
814 pa_memblock_unref(u->memchunk.memblock);
815 pa_memchunk_reset(&u->memchunk);
816 }
817
818 work_done = TRUE;
819
820 u->write_count += written;
821 u->since_start += written;
822
823 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
824
825 if (written >= n_bytes)
826 break;
827
828 n_bytes -= written;
829 }
830 }
831
832 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
833
834 if (u->use_tsched) {
835 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
836
837 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
838 process_usec = u->tsched_watermark_usec;
839
840 if (*sleep_usec > process_usec)
841 *sleep_usec -= process_usec;
842 else
843 *sleep_usec = 0;
844
845 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
846 } else
847 *sleep_usec = 0;
848
849 return work_done ? 1 : 0;
850 }
851
852 static void update_smoother(struct userdata *u) {
853 snd_pcm_sframes_t delay = 0;
854 int64_t position;
855 int err;
856 pa_usec_t now1 = 0, now2;
857 snd_pcm_status_t *status;
858 snd_htimestamp_t htstamp = { 0, 0 };
859
860 snd_pcm_status_alloca(&status);
861
862 pa_assert(u);
863 pa_assert(u->pcm_handle);
864
865 /* Let's update the time smoother */
866
867 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
868 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
869 return;
870 }
871
872 snd_pcm_status_get_htstamp(status, &htstamp);
873 now1 = pa_timespec_load(&htstamp);
874
875 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
876 if (now1 <= 0)
877 now1 = pa_rtclock_now();
878
879 /* check if the time since the last update is bigger than the interval */
880 if (u->last_smoother_update > 0)
881 if (u->last_smoother_update + u->smoother_interval > now1)
882 return;
883
884 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
885
886 if (PA_UNLIKELY(position < 0))
887 position = 0;
888
889 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
890
891 pa_smoother_put(u->smoother, now1, now2);
892
893 u->last_smoother_update = now1;
894 /* exponentially increase the update interval up to the MAX limit */
895 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
896 }
897
898 static pa_usec_t sink_get_latency(struct userdata *u) {
899 pa_usec_t r;
900 int64_t delay;
901 pa_usec_t now1, now2;
902
903 pa_assert(u);
904
905 now1 = pa_rtclock_now();
906 now2 = pa_smoother_get(u->smoother, now1);
907
908 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
909
910 r = delay >= 0 ? (pa_usec_t) delay : 0;
911
912 if (u->memchunk.memblock)
913 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
914
915 return r;
916 }
917
918 static int build_pollfd(struct userdata *u) {
919 pa_assert(u);
920 pa_assert(u->pcm_handle);
921
922 if (u->alsa_rtpoll_item)
923 pa_rtpoll_item_free(u->alsa_rtpoll_item);
924
925 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
926 return -1;
927
928 return 0;
929 }
930
931 /* Called from IO context */
932 static int suspend(struct userdata *u) {
933 pa_assert(u);
934 pa_assert(u->pcm_handle);
935
936 pa_smoother_pause(u->smoother, pa_rtclock_now());
937
938 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
939 * take awfully long with our long buffer sizes today. */
940 snd_pcm_close(u->pcm_handle);
941 u->pcm_handle = NULL;
942
943 if (u->alsa_rtpoll_item) {
944 pa_rtpoll_item_free(u->alsa_rtpoll_item);
945 u->alsa_rtpoll_item = NULL;
946 }
947
948 /* We reset max_rewind/max_request here to make sure that while we
949 * are suspended the old max_request/max_rewind values set before
950 * the suspend can influence the per-stream buffer of newly
951 * created streams, without their requirements having any
952 * influence on them. */
953 pa_sink_set_max_rewind_within_thread(u->sink, 0);
954 pa_sink_set_max_request_within_thread(u->sink, 0);
955
956 pa_log_info("Device suspended...");
957
958 return 0;
959 }
960
961 /* Called from IO context */
962 static int update_sw_params(struct userdata *u) {
963 snd_pcm_uframes_t avail_min;
964 int err;
965
966 pa_assert(u);
967
968 /* Use the full buffer if no one asked us for anything specific */
969 u->hwbuf_unused = 0;
970
971 if (u->use_tsched) {
972 pa_usec_t latency;
973
974 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
975 size_t b;
976
977 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
978
979 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
980
981 /* We need at least one sample in our buffer */
982
983 if (PA_UNLIKELY(b < u->frame_size))
984 b = u->frame_size;
985
986 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
987 }
988
989 fix_min_sleep_wakeup(u);
990 fix_tsched_watermark(u);
991 }
992
993 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
994
995 /* We need at last one frame in the used part of the buffer */
996 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
997
998 if (u->use_tsched) {
999 pa_usec_t sleep_usec, process_usec;
1000
1001 hw_sleep_time(u, &sleep_usec, &process_usec);
1002 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
1003 }
1004
1005 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1006
1007 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1008 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1009 return err;
1010 }
1011
1012 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1013 if (pa_alsa_pcm_is_hw(u->pcm_handle))
1014 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
1015 else {
1016 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
1017 pa_sink_set_max_rewind_within_thread(u->sink, 0);
1018 }
1019
1020 return 0;
1021 }
1022
1023 /* Called from IO Context on unsuspend or from main thread when creating sink */
1024 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
1025 pa_bool_t in_thread) {
1026 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
1027 &u->sink->sample_spec);
1028
1029 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1030 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1031
1032 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1033 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1034
1035 fix_min_sleep_wakeup(u);
1036 fix_tsched_watermark(u);
1037
1038 if (in_thread)
1039 pa_sink_set_latency_range_within_thread(u->sink,
1040 u->min_latency_ref,
1041 pa_bytes_to_usec(u->hwbuf_size, ss));
1042 else {
1043 pa_sink_set_latency_range(u->sink,
1044 0,
1045 pa_bytes_to_usec(u->hwbuf_size, ss));
1046
1047 /* work-around assert in pa_sink_set_latency_within_thead,
1048 keep track of min_latency and reuse it when
1049 this routine is called from IO context */
1050 u->min_latency_ref = u->sink->thread_info.min_latency;
1051 }
1052
1053 pa_log_info("Time scheduling watermark is %0.2fms",
1054 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
1055 }
1056
1057 /* Called from IO context */
1058 static int unsuspend(struct userdata *u) {
1059 pa_sample_spec ss;
1060 int err;
1061 pa_bool_t b, d;
1062 snd_pcm_uframes_t period_size, buffer_size;
1063 char *device_name = NULL;
1064
1065 pa_assert(u);
1066 pa_assert(!u->pcm_handle);
1067
1068 pa_log_info("Trying resume...");
1069
1070 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1071 /* Need to open device in NONAUDIO mode */
1072 int len = strlen(u->device_name) + 8;
1073
1074 device_name = pa_xmalloc(len);
1075 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1076 }
1077
1078 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1079 SND_PCM_NONBLOCK|
1080 SND_PCM_NO_AUTO_RESAMPLE|
1081 SND_PCM_NO_AUTO_CHANNELS|
1082 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1083 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1084 goto fail;
1085 }
1086
1087 ss = u->sink->sample_spec;
1088 period_size = u->fragment_size / u->frame_size;
1089 buffer_size = u->hwbuf_size / u->frame_size;
1090 b = u->use_mmap;
1091 d = u->use_tsched;
1092
1093 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1094 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1095 goto fail;
1096 }
1097
1098 if (b != u->use_mmap || d != u->use_tsched) {
1099 pa_log_warn("Resume failed, couldn't get original access mode.");
1100 goto fail;
1101 }
1102
1103 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1104 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1105 goto fail;
1106 }
1107
1108 if (period_size*u->frame_size != u->fragment_size ||
1109 buffer_size*u->frame_size != u->hwbuf_size) {
1110 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1111 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1112 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1113 goto fail;
1114 }
1115
1116 if (update_sw_params(u) < 0)
1117 goto fail;
1118
1119 if (build_pollfd(u) < 0)
1120 goto fail;
1121
1122 u->write_count = 0;
1123 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1124 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1125 u->last_smoother_update = 0;
1126
1127 u->first = TRUE;
1128 u->since_start = 0;
1129
1130 /* reset the watermark to the value defined when sink was created */
1131 if (u->use_tsched)
1132 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1133
1134 pa_log_info("Resumed successfully...");
1135
1136 pa_xfree(device_name);
1137 return 0;
1138
1139 fail:
1140 if (u->pcm_handle) {
1141 snd_pcm_close(u->pcm_handle);
1142 u->pcm_handle = NULL;
1143 }
1144
1145 pa_xfree(device_name);
1146
1147 return -PA_ERR_IO;
1148 }
1149
1150 /* Called from IO context */
1151 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1152 struct userdata *u = PA_SINK(o)->userdata;
1153
1154 switch (code) {
1155
1156 case PA_SINK_MESSAGE_GET_LATENCY: {
1157 pa_usec_t r = 0;
1158
1159 if (u->pcm_handle)
1160 r = sink_get_latency(u);
1161
1162 *((pa_usec_t*) data) = r;
1163
1164 return 0;
1165 }
1166
1167 case PA_SINK_MESSAGE_SET_STATE:
1168
1169 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1170
1171 case PA_SINK_SUSPENDED: {
1172 int r;
1173
1174 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1175
1176 if ((r = suspend(u)) < 0)
1177 return r;
1178
1179 break;
1180 }
1181
1182 case PA_SINK_IDLE:
1183 case PA_SINK_RUNNING: {
1184 int r;
1185
1186 if (u->sink->thread_info.state == PA_SINK_INIT) {
1187 if (build_pollfd(u) < 0)
1188 return -PA_ERR_IO;
1189 }
1190
1191 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1192 if ((r = unsuspend(u)) < 0)
1193 return r;
1194 }
1195
1196 break;
1197 }
1198
1199 case PA_SINK_UNLINKED:
1200 case PA_SINK_INIT:
1201 case PA_SINK_INVALID_STATE:
1202 ;
1203 }
1204
1205 break;
1206 }
1207
1208 return pa_sink_process_msg(o, code, data, offset, chunk);
1209 }
1210
1211 /* Called from main context */
1212 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1213 pa_sink_state_t old_state;
1214 struct userdata *u;
1215
1216 pa_sink_assert_ref(s);
1217 pa_assert_se(u = s->userdata);
1218
1219 old_state = pa_sink_get_state(u->sink);
1220
1221 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1222 reserve_done(u);
1223 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1224 if (reserve_init(u, u->device_name) < 0)
1225 return -PA_ERR_BUSY;
1226
1227 return 0;
1228 }
1229
1230 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1231 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1232
1233 pa_assert(u);
1234 pa_assert(u->mixer_handle);
1235
1236 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1237 return 0;
1238
1239 if (!PA_SINK_IS_LINKED(u->sink->state))
1240 return 0;
1241
1242 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1243 pa_sink_set_mixer_dirty(u->sink, TRUE);
1244 return 0;
1245 }
1246
1247 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1248 pa_sink_get_volume(u->sink, TRUE);
1249 pa_sink_get_mute(u->sink, TRUE);
1250 }
1251
1252 return 0;
1253 }
1254
1255 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1256 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1257
1258 pa_assert(u);
1259 pa_assert(u->mixer_handle);
1260
1261 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1262 return 0;
1263
1264 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1265 pa_sink_set_mixer_dirty(u->sink, TRUE);
1266 return 0;
1267 }
1268
1269 if (mask & SND_CTL_EVENT_MASK_VALUE)
1270 pa_sink_update_volume_and_mute(u->sink);
1271
1272 return 0;
1273 }
1274
1275 static void sink_get_volume_cb(pa_sink *s) {
1276 struct userdata *u = s->userdata;
1277 pa_cvolume r;
1278 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1279
1280 pa_assert(u);
1281 pa_assert(u->mixer_path);
1282 pa_assert(u->mixer_handle);
1283
1284 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1285 return;
1286
1287 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1288 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1289
1290 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1291
1292 if (u->mixer_path->has_dB) {
1293 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1294
1295 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1296 }
1297
1298 if (pa_cvolume_equal(&u->hardware_volume, &r))
1299 return;
1300
1301 s->real_volume = u->hardware_volume = r;
1302
1303 /* Hmm, so the hardware volume changed, let's reset our software volume */
1304 if (u->mixer_path->has_dB)
1305 pa_sink_set_soft_volume(s, NULL);
1306 }
1307
1308 static void sink_set_volume_cb(pa_sink *s) {
1309 struct userdata *u = s->userdata;
1310 pa_cvolume r;
1311 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1312 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1313
1314 pa_assert(u);
1315 pa_assert(u->mixer_path);
1316 pa_assert(u->mixer_handle);
1317
1318 /* Shift up by the base volume */
1319 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1320
1321 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1322 return;
1323
1324 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1325 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1326
1327 u->hardware_volume = r;
1328
1329 if (u->mixer_path->has_dB) {
1330 pa_cvolume new_soft_volume;
1331 pa_bool_t accurate_enough;
1332 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1333
1334 /* Match exactly what the user requested by software */
1335 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1336
1337 /* If the adjustment to do in software is only minimal we
1338 * can skip it. That saves us CPU at the expense of a bit of
1339 * accuracy */
1340 accurate_enough =
1341 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1342 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1343
1344 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1345 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1346 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1347 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1348 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1349 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1350 pa_yes_no(accurate_enough));
1351 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1352
1353 if (!accurate_enough)
1354 s->soft_volume = new_soft_volume;
1355
1356 } else {
1357 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1358
1359 /* We can't match exactly what the user requested, hence let's
1360 * at least tell the user about it */
1361
1362 s->real_volume = r;
1363 }
1364 }
1365
1366 static void sink_write_volume_cb(pa_sink *s) {
1367 struct userdata *u = s->userdata;
1368 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1369
1370 pa_assert(u);
1371 pa_assert(u->mixer_path);
1372 pa_assert(u->mixer_handle);
1373 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1374
1375 /* Shift up by the base volume */
1376 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1377
1378 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1379 pa_log_error("Writing HW volume failed");
1380 else {
1381 pa_cvolume tmp_vol;
1382 pa_bool_t accurate_enough;
1383
1384 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1385 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1386
1387 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1388 accurate_enough =
1389 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1390 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1391
1392 if (!accurate_enough) {
1393 union {
1394 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1395 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1396 } vol;
1397
1398 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1399 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1400 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1401 pa_log_debug(" in dB: %s (request) != %s",
1402 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1403 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1404 }
1405 }
1406 }
1407
1408 static void sink_get_mute_cb(pa_sink *s) {
1409 struct userdata *u = s->userdata;
1410 pa_bool_t b;
1411
1412 pa_assert(u);
1413 pa_assert(u->mixer_path);
1414 pa_assert(u->mixer_handle);
1415
1416 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1417 return;
1418
1419 s->muted = b;
1420 }
1421
1422 static void sink_set_mute_cb(pa_sink *s) {
1423 struct userdata *u = s->userdata;
1424
1425 pa_assert(u);
1426 pa_assert(u->mixer_path);
1427 pa_assert(u->mixer_handle);
1428
1429 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1430 }
1431
1432 static void mixer_volume_init(struct userdata *u) {
1433 pa_assert(u);
1434
1435 if (!u->mixer_path->has_volume) {
1436 pa_sink_set_write_volume_callback(u->sink, NULL);
1437 pa_sink_set_get_volume_callback(u->sink, NULL);
1438 pa_sink_set_set_volume_callback(u->sink, NULL);
1439
1440 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1441 } else {
1442 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1443 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1444
1445 if (u->mixer_path->has_dB && u->deferred_volume) {
1446 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1447 pa_log_info("Successfully enabled deferred volume.");
1448 } else
1449 pa_sink_set_write_volume_callback(u->sink, NULL);
1450
1451 if (u->mixer_path->has_dB) {
1452 pa_sink_enable_decibel_volume(u->sink, TRUE);
1453 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1454
1455 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1456 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1457
1458 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1459 } else {
1460 pa_sink_enable_decibel_volume(u->sink, FALSE);
1461 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1462
1463 u->sink->base_volume = PA_VOLUME_NORM;
1464 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1465 }
1466
1467 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1468 }
1469
1470 if (!u->mixer_path->has_mute) {
1471 pa_sink_set_get_mute_callback(u->sink, NULL);
1472 pa_sink_set_set_mute_callback(u->sink, NULL);
1473 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1474 } else {
1475 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1476 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1477 pa_log_info("Using hardware mute control.");
1478 }
1479 }
1480
1481 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1482 struct userdata *u = s->userdata;
1483
1484 pa_assert(u);
1485 pa_assert(p);
1486 pa_assert(u->ucm_context);
1487
1488 return pa_alsa_ucm_set_port(u->ucm_context, p, TRUE);
1489 }
1490
1491 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1492 struct userdata *u = s->userdata;
1493 pa_alsa_port_data *data;
1494
1495 pa_assert(u);
1496 pa_assert(p);
1497 pa_assert(u->mixer_handle);
1498
1499 data = PA_DEVICE_PORT_DATA(p);
1500
1501 pa_assert_se(u->mixer_path = data->path);
1502 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1503
1504 mixer_volume_init(u);
1505
1506 if (s->set_mute)
1507 s->set_mute(s);
1508 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1509 if (s->write_volume)
1510 s->write_volume(s);
1511 } else {
1512 if (s->set_volume)
1513 s->set_volume(s);
1514 }
1515
1516 return 0;
1517 }
1518
1519 static void sink_update_requested_latency_cb(pa_sink *s) {
1520 struct userdata *u = s->userdata;
1521 size_t before;
1522 pa_assert(u);
1523 pa_assert(u->use_tsched); /* only when timer scheduling is used
1524 * we can dynamically adjust the
1525 * latency */
1526
1527 if (!u->pcm_handle)
1528 return;
1529
1530 before = u->hwbuf_unused;
1531 update_sw_params(u);
1532
1533 /* Let's check whether we now use only a smaller part of the
1534 buffer then before. If so, we need to make sure that subsequent
1535 rewinds are relative to the new maximum fill level and not to the
1536 current fill level. Thus, let's do a full rewind once, to clear
1537 things up. */
1538
1539 if (u->hwbuf_unused > before) {
1540 pa_log_debug("Requesting rewind due to latency change.");
1541 pa_sink_request_rewind(s, (size_t) -1);
1542 }
1543 }
1544
1545 static pa_idxset* sink_get_formats(pa_sink *s) {
1546 struct userdata *u = s->userdata;
1547 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1548 pa_format_info *f;
1549 uint32_t idx;
1550
1551 pa_assert(u);
1552
1553 PA_IDXSET_FOREACH(f, u->formats, idx) {
1554 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1555 }
1556
1557 return ret;
1558 }
1559
1560 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1561 struct userdata *u = s->userdata;
1562 pa_format_info *f, *g;
1563 uint32_t idx, n;
1564
1565 pa_assert(u);
1566
1567 /* FIXME: also validate sample rates against what the device supports */
1568 PA_IDXSET_FOREACH(f, formats, idx) {
1569 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1570 /* EAC3 cannot be sent over over S/PDIF */
1571 return FALSE;
1572 }
1573
1574 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
1575 u->formats = pa_idxset_new(NULL, NULL);
1576
1577 /* Note: the logic below won't apply if we're using software encoding.
1578 * This is fine for now since we don't support that via the passthrough
1579 * framework, but this must be changed if we do. */
1580
1581 /* Count how many sample rates we support */
1582 for (idx = 0, n = 0; u->rates[idx]; idx++)
1583 n++;
1584
1585 /* First insert non-PCM formats since we prefer those. */
1586 PA_IDXSET_FOREACH(f, formats, idx) {
1587 if (!pa_format_info_is_pcm(f)) {
1588 g = pa_format_info_copy(f);
1589 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1590 pa_idxset_put(u->formats, g, NULL);
1591 }
1592 }
1593
1594 /* Now add any PCM formats */
1595 PA_IDXSET_FOREACH(f, formats, idx) {
1596 if (pa_format_info_is_pcm(f)) {
1597 /* We don't set rates here since we'll just tack on a resampler for
1598 * unsupported rates */
1599 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1600 }
1601 }
1602
1603 return TRUE;
1604 }
1605
1606 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate) {
1607 struct userdata *u = s->userdata;
1608 int i;
1609 pa_bool_t supported = FALSE;
1610
1611 pa_assert(u);
1612
1613 for (i = 0; u->rates[i]; i++) {
1614 if (u->rates[i] == rate) {
1615 supported = TRUE;
1616 break;
1617 }
1618 }
1619
1620 if (!supported) {
1621 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1622 return FALSE;
1623 }
1624
1625 if (!PA_SINK_IS_OPENED(s->state)) {
1626 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1627 u->sink->sample_spec.rate = rate;
1628 return TRUE;
1629 }
1630
1631 return FALSE;
1632 }
1633
1634 static int process_rewind(struct userdata *u) {
1635 snd_pcm_sframes_t unused;
1636 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1637 pa_assert(u);
1638
1639 if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1640 pa_sink_process_rewind(u->sink, 0);
1641 return 0;
1642 }
1643
1644 /* Figure out how much we shall rewind and reset the counter */
1645 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1646
1647 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1648
1649 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1650 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1651 return -1;
1652 }
1653
1654 unused_nbytes = (size_t) unused * u->frame_size;
1655
1656 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1657 unused_nbytes += u->rewind_safeguard;
1658
1659 if (u->hwbuf_size > unused_nbytes)
1660 limit_nbytes = u->hwbuf_size - unused_nbytes;
1661 else
1662 limit_nbytes = 0;
1663
1664 if (rewind_nbytes > limit_nbytes)
1665 rewind_nbytes = limit_nbytes;
1666
1667 if (rewind_nbytes > 0) {
1668 snd_pcm_sframes_t in_frames, out_frames;
1669
1670 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1671
1672 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1673 pa_log_debug("before: %lu", (unsigned long) in_frames);
1674 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1675 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1676 if (try_recover(u, "process_rewind", out_frames) < 0)
1677 return -1;
1678 out_frames = 0;
1679 }
1680
1681 pa_log_debug("after: %lu", (unsigned long) out_frames);
1682
1683 rewind_nbytes = (size_t) out_frames * u->frame_size;
1684
1685 if (rewind_nbytes <= 0)
1686 pa_log_info("Tried rewind, but was apparently not possible.");
1687 else {
1688 u->write_count -= rewind_nbytes;
1689 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1690 pa_sink_process_rewind(u->sink, rewind_nbytes);
1691
1692 u->after_rewind = TRUE;
1693 return 0;
1694 }
1695 } else
1696 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1697
1698 pa_sink_process_rewind(u->sink, 0);
1699 return 0;
1700 }
1701
1702 static void thread_func(void *userdata) {
1703 struct userdata *u = userdata;
1704 unsigned short revents = 0;
1705
1706 pa_assert(u);
1707
1708 pa_log_debug("Thread starting up");
1709
1710 if (u->core->realtime_scheduling)
1711 pa_make_realtime(u->core->realtime_priority);
1712
1713 pa_thread_mq_install(&u->thread_mq);
1714
1715 for (;;) {
1716 int ret;
1717 pa_usec_t rtpoll_sleep = 0, real_sleep;
1718
1719 #ifdef DEBUG_TIMING
1720 pa_log_debug("Loop");
1721 #endif
1722
1723 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
1724 if (process_rewind(u) < 0)
1725 goto fail;
1726 }
1727
1728 /* Render some data and write it to the dsp */
1729 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1730 int work_done;
1731 pa_usec_t sleep_usec = 0;
1732 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1733
1734 if (u->use_mmap)
1735 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1736 else
1737 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1738
1739 if (work_done < 0)
1740 goto fail;
1741
1742 /* pa_log_debug("work_done = %i", work_done); */
1743
1744 if (work_done) {
1745
1746 if (u->first) {
1747 pa_log_info("Starting playback.");
1748 snd_pcm_start(u->pcm_handle);
1749
1750 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1751
1752 u->first = FALSE;
1753 }
1754
1755 update_smoother(u);
1756 }
1757
1758 if (u->use_tsched) {
1759 pa_usec_t cusec;
1760
1761 if (u->since_start <= u->hwbuf_size) {
1762
1763 /* USB devices on ALSA seem to hit a buffer
1764 * underrun during the first iterations much
1765 * quicker then we calculate here, probably due to
1766 * the transport latency. To accommodate for that
1767 * we artificially decrease the sleep time until
1768 * we have filled the buffer at least once
1769 * completely.*/
1770
1771 if (pa_log_ratelimit(PA_LOG_DEBUG))
1772 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1773 sleep_usec /= 2;
1774 }
1775
1776 /* OK, the playback buffer is now full, let's
1777 * calculate when to wake up next */
1778 #ifdef DEBUG_TIMING
1779 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1780 #endif
1781
1782 /* Convert from the sound card time domain to the
1783 * system time domain */
1784 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1785
1786 #ifdef DEBUG_TIMING
1787 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1788 #endif
1789
1790 /* We don't trust the conversion, so we wake up whatever comes first */
1791 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1792 }
1793
1794 u->after_rewind = FALSE;
1795
1796 }
1797
1798 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1799 pa_usec_t volume_sleep;
1800 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1801 if (volume_sleep > 0) {
1802 if (rtpoll_sleep > 0)
1803 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1804 else
1805 rtpoll_sleep = volume_sleep;
1806 }
1807 }
1808
1809 if (rtpoll_sleep > 0) {
1810 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1811 real_sleep = pa_rtclock_now();
1812 }
1813 else
1814 pa_rtpoll_set_timer_disabled(u->rtpoll);
1815
1816 /* Hmm, nothing to do. Let's sleep */
1817 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1818 goto fail;
1819
1820 if (rtpoll_sleep > 0) {
1821 real_sleep = pa_rtclock_now() - real_sleep;
1822 #ifdef DEBUG_TIMING
1823 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1824 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1825 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1826 #endif
1827 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1828 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1829 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1830 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1831 }
1832
1833 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1834 pa_sink_volume_change_apply(u->sink, NULL);
1835
1836 if (ret == 0)
1837 goto finish;
1838
1839 /* Tell ALSA about this and process its response */
1840 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1841 struct pollfd *pollfd;
1842 int err;
1843 unsigned n;
1844
1845 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1846
1847 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1848 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1849 goto fail;
1850 }
1851
1852 if (revents & ~POLLOUT) {
1853 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1854 goto fail;
1855
1856 u->first = TRUE;
1857 u->since_start = 0;
1858 revents = 0;
1859 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1860 pa_log_debug("Wakeup from ALSA!");
1861
1862 } else
1863 revents = 0;
1864 }
1865
1866 fail:
1867 /* If this was no regular exit from the loop we have to continue
1868 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1869 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1870 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1871
1872 finish:
1873 pa_log_debug("Thread shutting down");
1874 }
1875
1876 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1877 const char *n;
1878 char *t;
1879
1880 pa_assert(data);
1881 pa_assert(ma);
1882 pa_assert(device_name);
1883
1884 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1885 pa_sink_new_data_set_name(data, n);
1886 data->namereg_fail = TRUE;
1887 return;
1888 }
1889
1890 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1891 data->namereg_fail = TRUE;
1892 else {
1893 n = device_id ? device_id : device_name;
1894 data->namereg_fail = FALSE;
1895 }
1896
1897 if (mapping)
1898 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1899 else
1900 t = pa_sprintf_malloc("alsa_output.%s", n);
1901
1902 pa_sink_new_data_set_name(data, t);
1903 pa_xfree(t);
1904 }
1905
1906 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1907 snd_hctl_t *hctl;
1908
1909 if (!mapping && !element)
1910 return;
1911
1912 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1913 pa_log_info("Failed to find a working mixer device.");
1914 return;
1915 }
1916
1917 if (element) {
1918
1919 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1920 goto fail;
1921
1922 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1923 goto fail;
1924
1925 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1926 pa_alsa_path_dump(u->mixer_path);
1927 } else if (!(u->mixer_path_set = mapping->output_path_set))
1928 goto fail;
1929
1930 return;
1931
1932 fail:
1933
1934 if (u->mixer_path) {
1935 pa_alsa_path_free(u->mixer_path);
1936 u->mixer_path = NULL;
1937 }
1938
1939 if (u->mixer_handle) {
1940 snd_mixer_close(u->mixer_handle);
1941 u->mixer_handle = NULL;
1942 }
1943 }
1944
1945 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1946 pa_bool_t need_mixer_callback = FALSE;
1947
1948 pa_assert(u);
1949
1950 if (!u->mixer_handle)
1951 return 0;
1952
1953 if (u->sink->active_port) {
1954 pa_alsa_port_data *data;
1955
1956 /* We have a list of supported paths, so let's activate the
1957 * one that has been chosen as active */
1958
1959 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1960 u->mixer_path = data->path;
1961
1962 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
1963
1964 } else {
1965
1966 if (!u->mixer_path && u->mixer_path_set)
1967 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1968
1969 if (u->mixer_path) {
1970 /* Hmm, we have only a single path, then let's activate it */
1971
1972 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
1973
1974 } else
1975 return 0;
1976 }
1977
1978 mixer_volume_init(u);
1979
1980 /* Will we need to register callbacks? */
1981 if (u->mixer_path_set && u->mixer_path_set->paths) {
1982 pa_alsa_path *p;
1983 void *state;
1984
1985 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1986 if (p->has_volume || p->has_mute)
1987 need_mixer_callback = TRUE;
1988 }
1989 }
1990 else if (u->mixer_path)
1991 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1992
1993 if (need_mixer_callback) {
1994 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1995 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1996 u->mixer_pd = pa_alsa_mixer_pdata_new();
1997 mixer_callback = io_mixer_callback;
1998
1999 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
2000 pa_log("Failed to initialize file descriptor monitoring");
2001 return -1;
2002 }
2003 } else {
2004 u->mixer_fdl = pa_alsa_fdlist_new();
2005 mixer_callback = ctl_mixer_callback;
2006
2007 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
2008 pa_log("Failed to initialize file descriptor monitoring");
2009 return -1;
2010 }
2011 }
2012
2013 if (u->mixer_path_set)
2014 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
2015 else
2016 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
2017 }
2018
2019 return 0;
2020 }
2021
2022 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
2023
2024 struct userdata *u = NULL;
2025 const char *dev_id = NULL, *key, *mod_name;
2026 pa_sample_spec ss;
2027 char *thread_name = NULL;
2028 uint32_t alternate_sample_rate;
2029 pa_channel_map map;
2030 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2031 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2032 size_t frame_size;
2033 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
2034 pa_sink_new_data data;
2035 pa_alsa_profile_set *profile_set = NULL;
2036 void *state = NULL;
2037
2038 pa_assert(m);
2039 pa_assert(ma);
2040
2041 ss = m->core->default_sample_spec;
2042 map = m->core->default_channel_map;
2043 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2044 pa_log("Failed to parse sample specification and channel map");
2045 goto fail;
2046 }
2047
2048 alternate_sample_rate = m->core->alternate_sample_rate;
2049 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2050 pa_log("Failed to parse alternate sample rate");
2051 goto fail;
2052 }
2053
2054 frame_size = pa_frame_size(&ss);
2055
2056 nfrags = m->core->default_n_fragments;
2057 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2058 if (frag_size <= 0)
2059 frag_size = (uint32_t) frame_size;
2060 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2061 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2062
2063 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2064 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2065 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2066 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2067 pa_log("Failed to parse buffer metrics");
2068 goto fail;
2069 }
2070
2071 buffer_size = nfrags * frag_size;
2072
2073 period_frames = frag_size/frame_size;
2074 buffer_frames = buffer_size/frame_size;
2075 tsched_frames = tsched_size/frame_size;
2076
2077 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2078 pa_log("Failed to parse mmap argument.");
2079 goto fail;
2080 }
2081
2082 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2083 pa_log("Failed to parse tsched argument.");
2084 goto fail;
2085 }
2086
2087 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2088 pa_log("Failed to parse ignore_dB argument.");
2089 goto fail;
2090 }
2091
2092 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2093 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2094 pa_log("Failed to parse rewind_safeguard argument");
2095 goto fail;
2096 }
2097
2098 deferred_volume = m->core->deferred_volume;
2099 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2100 pa_log("Failed to parse deferred_volume argument.");
2101 goto fail;
2102 }
2103
2104 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2105 pa_log("Failed to parse fixed_latency_range argument.");
2106 goto fail;
2107 }
2108
2109 use_tsched = pa_alsa_may_tsched(use_tsched);
2110
2111 u = pa_xnew0(struct userdata, 1);
2112 u->core = m->core;
2113 u->module = m;
2114 u->use_mmap = use_mmap;
2115 u->use_tsched = use_tsched;
2116 u->deferred_volume = deferred_volume;
2117 u->fixed_latency_range = fixed_latency_range;
2118 u->first = TRUE;
2119 u->rewind_safeguard = rewind_safeguard;
2120 u->rtpoll = pa_rtpoll_new();
2121 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2122
2123 u->smoother = pa_smoother_new(
2124 SMOOTHER_ADJUST_USEC,
2125 SMOOTHER_WINDOW_USEC,
2126 TRUE,
2127 TRUE,
2128 5,
2129 pa_rtclock_now(),
2130 TRUE);
2131 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2132
2133 /* use ucm */
2134 if (mapping && mapping->ucm_context.ucm)
2135 u->ucm_context = &mapping->ucm_context;
2136
2137 dev_id = pa_modargs_get_value(
2138 ma, "device_id",
2139 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2140
2141 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2142
2143 if (reserve_init(u, dev_id) < 0)
2144 goto fail;
2145
2146 if (reserve_monitor_init(u, dev_id) < 0)
2147 goto fail;
2148
2149 b = use_mmap;
2150 d = use_tsched;
2151
2152 if (mapping) {
2153
2154 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2155 pa_log("device_id= not set");
2156 goto fail;
2157 }
2158
2159 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2160 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2161 pa_log("Failed to enable ucm modifier %s", mod_name);
2162 else
2163 pa_log_debug("Enabled ucm modifier %s", mod_name);
2164 }
2165
2166 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2167 dev_id,
2168 &u->device_name,
2169 &ss, &map,
2170 SND_PCM_STREAM_PLAYBACK,
2171 &period_frames, &buffer_frames, tsched_frames,
2172 &b, &d, mapping)))
2173 goto fail;
2174
2175 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2176
2177 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2178 goto fail;
2179
2180 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2181 dev_id,
2182 &u->device_name,
2183 &ss, &map,
2184 SND_PCM_STREAM_PLAYBACK,
2185 &period_frames, &buffer_frames, tsched_frames,
2186 &b, &d, profile_set, &mapping)))
2187 goto fail;
2188
2189 } else {
2190
2191 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2192 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2193 &u->device_name,
2194 &ss, &map,
2195 SND_PCM_STREAM_PLAYBACK,
2196 &period_frames, &buffer_frames, tsched_frames,
2197 &b, &d, FALSE)))
2198 goto fail;
2199 }
2200
2201 pa_assert(u->device_name);
2202 pa_log_info("Successfully opened device %s.", u->device_name);
2203
2204 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2205 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2206 goto fail;
2207 }
2208
2209 if (mapping)
2210 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2211
2212 if (use_mmap && !b) {
2213 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2214 u->use_mmap = use_mmap = FALSE;
2215 }
2216
2217 if (use_tsched && (!b || !d)) {
2218 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2219 u->use_tsched = use_tsched = FALSE;
2220 }
2221
2222 if (u->use_mmap)
2223 pa_log_info("Successfully enabled mmap() mode.");
2224
2225 if (u->use_tsched) {
2226 pa_log_info("Successfully enabled timer-based scheduling mode.");
2227
2228 if (u->fixed_latency_range)
2229 pa_log_info("Disabling latency range changes on underrun");
2230 }
2231
2232 if (is_iec958(u) || is_hdmi(u))
2233 set_formats = TRUE;
2234
2235 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2236 if (!u->rates) {
2237 pa_log_error("Failed to find any supported sample rates.");
2238 goto fail;
2239 }
2240
2241 /* ALSA might tweak the sample spec, so recalculate the frame size */
2242 frame_size = pa_frame_size(&ss);
2243
2244 if (!u->ucm_context)
2245 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2246
2247 pa_sink_new_data_init(&data);
2248 data.driver = driver;
2249 data.module = m;
2250 data.card = card;
2251 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2252
2253 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2254 * variable instead of using &data.namereg_fail directly, because
2255 * data.namereg_fail is a bitfield and taking the address of a bitfield
2256 * variable is impossible. */
2257 namereg_fail = data.namereg_fail;
2258 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2259 pa_log("Failed to parse namereg_fail argument.");
2260 pa_sink_new_data_done(&data);
2261 goto fail;
2262 }
2263 data.namereg_fail = namereg_fail;
2264
2265 pa_sink_new_data_set_sample_spec(&data, &ss);
2266 pa_sink_new_data_set_channel_map(&data, &map);
2267 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2268
2269 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2270 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2271 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2272 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2273 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2274
2275 if (mapping) {
2276 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2277 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2278
2279 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2280 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2281 }
2282
2283 pa_alsa_init_description(data.proplist);
2284
2285 if (u->control_device)
2286 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2287
2288 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2289 pa_log("Invalid properties");
2290 pa_sink_new_data_done(&data);
2291 goto fail;
2292 }
2293
2294 if (u->ucm_context)
2295 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, TRUE, card);
2296 else if (u->mixer_path_set)
2297 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2298
2299 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2300 (set_formats ? PA_SINK_SET_FORMATS : 0));
2301 pa_sink_new_data_done(&data);
2302
2303 if (!u->sink) {
2304 pa_log("Failed to create sink object");
2305 goto fail;
2306 }
2307
2308 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2309 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2310 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2311 goto fail;
2312 }
2313
2314 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2315 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2316 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2317 goto fail;
2318 }
2319
2320 u->sink->parent.process_msg = sink_process_msg;
2321 if (u->use_tsched)
2322 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2323 u->sink->set_state = sink_set_state_cb;
2324 if (u->ucm_context)
2325 u->sink->set_port = sink_set_port_ucm_cb;
2326 else
2327 u->sink->set_port = sink_set_port_cb;
2328 if (u->sink->alternate_sample_rate)
2329 u->sink->update_rate = sink_update_rate_cb;
2330 u->sink->userdata = u;
2331
2332 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2333 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2334
2335 u->frame_size = frame_size;
2336 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2337 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2338 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2339
2340 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2341 (double) u->hwbuf_size / (double) u->fragment_size,
2342 (long unsigned) u->fragment_size,
2343 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2344 (long unsigned) u->hwbuf_size,
2345 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2346
2347 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2348 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2349 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2350 else {
2351 pa_log_info("Disabling rewind for device %s", u->device_name);
2352 pa_sink_set_max_rewind(u->sink, 0);
2353 }
2354
2355 if (u->use_tsched) {
2356 u->tsched_watermark_ref = tsched_watermark;
2357 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2358 } else
2359 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2360
2361 reserve_update(u);
2362
2363 if (update_sw_params(u) < 0)
2364 goto fail;
2365
2366 if (u->ucm_context) {
2367 if (u->sink->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, TRUE) < 0)
2368 goto fail;
2369 } else if (setup_mixer(u, ignore_dB) < 0)
2370 goto fail;
2371
2372 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2373
2374 thread_name = pa_sprintf_malloc("alsa-sink-%s", pa_strnull(pa_proplist_gets(u->sink->proplist, "alsa.id")));
2375 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2376 pa_log("Failed to create thread.");
2377 goto fail;
2378 }
2379 pa_xfree(thread_name);
2380 thread_name = NULL;
2381
2382 /* Get initial mixer settings */
2383 if (data.volume_is_set) {
2384 if (u->sink->set_volume)
2385 u->sink->set_volume(u->sink);
2386 } else {
2387 if (u->sink->get_volume)
2388 u->sink->get_volume(u->sink);
2389 }
2390
2391 if (data.muted_is_set) {
2392 if (u->sink->set_mute)
2393 u->sink->set_mute(u->sink);
2394 } else {
2395 if (u->sink->get_mute)
2396 u->sink->get_mute(u->sink);
2397 }
2398
2399 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2400 u->sink->write_volume(u->sink);
2401
2402 if (set_formats) {
2403 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2404 pa_format_info *format;
2405
2406 /* To start with, we only support PCM formats. Other formats may be added
2407 * with pa_sink_set_formats().*/
2408 format = pa_format_info_new();
2409 format->encoding = PA_ENCODING_PCM;
2410 u->formats = pa_idxset_new(NULL, NULL);
2411 pa_idxset_put(u->formats, format, NULL);
2412
2413 u->sink->get_formats = sink_get_formats;
2414 u->sink->set_formats = sink_set_formats;
2415 }
2416
2417 pa_sink_put(u->sink);
2418
2419 if (profile_set)
2420 pa_alsa_profile_set_free(profile_set);
2421
2422 return u->sink;
2423
2424 fail:
2425 pa_xfree(thread_name);
2426
2427 if (u)
2428 userdata_free(u);
2429
2430 if (profile_set)
2431 pa_alsa_profile_set_free(profile_set);
2432
2433 return NULL;
2434 }
2435
2436 static void userdata_free(struct userdata *u) {
2437 pa_assert(u);
2438
2439 if (u->sink)
2440 pa_sink_unlink(u->sink);
2441
2442 if (u->thread) {
2443 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2444 pa_thread_free(u->thread);
2445 }
2446
2447 pa_thread_mq_done(&u->thread_mq);
2448
2449 if (u->sink)
2450 pa_sink_unref(u->sink);
2451
2452 if (u->memchunk.memblock)
2453 pa_memblock_unref(u->memchunk.memblock);
2454
2455 if (u->mixer_pd)
2456 pa_alsa_mixer_pdata_free(u->mixer_pd);
2457
2458 if (u->alsa_rtpoll_item)
2459 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2460
2461 if (u->rtpoll)
2462 pa_rtpoll_free(u->rtpoll);
2463
2464 if (u->pcm_handle) {
2465 snd_pcm_drop(u->pcm_handle);
2466 snd_pcm_close(u->pcm_handle);
2467 }
2468
2469 if (u->mixer_fdl)
2470 pa_alsa_fdlist_free(u->mixer_fdl);
2471
2472 if (u->mixer_path && !u->mixer_path_set)
2473 pa_alsa_path_free(u->mixer_path);
2474
2475 if (u->mixer_handle)
2476 snd_mixer_close(u->mixer_handle);
2477
2478 if (u->smoother)
2479 pa_smoother_free(u->smoother);
2480
2481 if (u->formats)
2482 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
2483
2484 if (u->rates)
2485 pa_xfree(u->rates);
2486
2487 reserve_done(u);
2488 monitor_done(u);
2489
2490 pa_xfree(u->device_name);
2491 pa_xfree(u->control_device);
2492 pa_xfree(u->paths_dir);
2493 pa_xfree(u);
2494 }
2495
2496 void pa_alsa_sink_free(pa_sink *s) {
2497 struct userdata *u;
2498
2499 pa_sink_assert_ref(s);
2500 pa_assert_se(u = s->userdata);
2501
2502 userdata_free(u);
2503 }