]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Whitespace cleanup: Remove all multiple newlines
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
41
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
57
58 #include <modules/reserve-wrap.h>
59
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
62
63 /* #define DEBUG_TIMING */
64
65 #define DEFAULT_DEVICE "default"
66
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
78
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
92
93 struct userdata {
94 pa_core *core;
95 pa_module *module;
96 pa_sink *sink;
97
98 pa_thread *thread;
99 pa_thread_mq thread_mq;
100 pa_rtpoll *rtpoll;
101
102 snd_pcm_t *pcm_handle;
103
104 char *paths_dir;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
110
111 pa_cvolume hardware_volume;
112
113 unsigned int *rates;
114
115 size_t
116 frame_size,
117 fragment_size,
118 hwbuf_size,
119 tsched_watermark,
120 tsched_watermark_ref,
121 hwbuf_unused,
122 min_sleep,
123 min_wakeup,
124 watermark_inc_step,
125 watermark_dec_step,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
128 rewind_safeguard;
129
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132 pa_usec_t tsched_watermark_usec;
133
134 pa_memchunk memchunk;
135
136 char *device_name; /* name of the PCM device */
137 char *control_device; /* name of the control device */
138
139 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
140
141 pa_bool_t first, after_rewind;
142
143 pa_rtpoll_item *alsa_rtpoll_item;
144
145 pa_smoother *smoother;
146 uint64_t write_count;
147 uint64_t since_start;
148 pa_usec_t smoother_interval;
149 pa_usec_t last_smoother_update;
150
151 pa_idxset *formats;
152
153 pa_reserve_wrapper *reserve;
154 pa_hook_slot *reserve_slot;
155 pa_reserve_monitor_wrapper *monitor;
156 pa_hook_slot *monitor_slot;
157
158 /* ucm context */
159 pa_alsa_ucm_mapping_context *ucm_context;
160 };
161
162 static void userdata_free(struct userdata *u);
163
164 /* FIXME: Is there a better way to do this than device names? */
165 static pa_bool_t is_iec958(struct userdata *u) {
166 return (strncmp("iec958", u->device_name, 6) == 0);
167 }
168
169 static pa_bool_t is_hdmi(struct userdata *u) {
170 return (strncmp("hdmi", u->device_name, 4) == 0);
171 }
172
173 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
174 pa_assert(r);
175 pa_assert(u);
176
177 pa_log_debug("Suspending sink %s, because another application requested us to release the device.", u->sink->name);
178
179 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
180 return PA_HOOK_CANCEL;
181
182 return PA_HOOK_OK;
183 }
184
185 static void reserve_done(struct userdata *u) {
186 pa_assert(u);
187
188 if (u->reserve_slot) {
189 pa_hook_slot_free(u->reserve_slot);
190 u->reserve_slot = NULL;
191 }
192
193 if (u->reserve) {
194 pa_reserve_wrapper_unref(u->reserve);
195 u->reserve = NULL;
196 }
197 }
198
199 static void reserve_update(struct userdata *u) {
200 const char *description;
201 pa_assert(u);
202
203 if (!u->sink || !u->reserve)
204 return;
205
206 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
207 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
208 }
209
210 static int reserve_init(struct userdata *u, const char *dname) {
211 char *rname;
212
213 pa_assert(u);
214 pa_assert(dname);
215
216 if (u->reserve)
217 return 0;
218
219 if (pa_in_system_mode())
220 return 0;
221
222 if (!(rname = pa_alsa_get_reserve_name(dname)))
223 return 0;
224
225 /* We are resuming, try to lock the device */
226 u->reserve = pa_reserve_wrapper_get(u->core, rname);
227 pa_xfree(rname);
228
229 if (!(u->reserve))
230 return -1;
231
232 reserve_update(u);
233
234 pa_assert(!u->reserve_slot);
235 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
236
237 return 0;
238 }
239
240 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
241 pa_assert(w);
242 pa_assert(u);
243
244 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
245 pa_log_debug("Suspending sink %s, because another application is blocking the access to the device.", u->sink->name);
246 pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION);
247 } else {
248 pa_log_debug("Resuming sink %s, because other applications aren't blocking access to the device any more.", u->sink->name);
249 pa_sink_suspend(u->sink, false, PA_SUSPEND_APPLICATION);
250 }
251
252 return PA_HOOK_OK;
253 }
254
255 static void monitor_done(struct userdata *u) {
256 pa_assert(u);
257
258 if (u->monitor_slot) {
259 pa_hook_slot_free(u->monitor_slot);
260 u->monitor_slot = NULL;
261 }
262
263 if (u->monitor) {
264 pa_reserve_monitor_wrapper_unref(u->monitor);
265 u->monitor = NULL;
266 }
267 }
268
269 static int reserve_monitor_init(struct userdata *u, const char *dname) {
270 char *rname;
271
272 pa_assert(u);
273 pa_assert(dname);
274
275 if (pa_in_system_mode())
276 return 0;
277
278 if (!(rname = pa_alsa_get_reserve_name(dname)))
279 return 0;
280
281 /* We are resuming, try to lock the device */
282 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
283 pa_xfree(rname);
284
285 if (!(u->monitor))
286 return -1;
287
288 pa_assert(!u->monitor_slot);
289 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
290
291 return 0;
292 }
293
294 static void fix_min_sleep_wakeup(struct userdata *u) {
295 size_t max_use, max_use_2;
296
297 pa_assert(u);
298 pa_assert(u->use_tsched);
299
300 max_use = u->hwbuf_size - u->hwbuf_unused;
301 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
302
303 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
304 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
305
306 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
307 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
308 }
309
310 static void fix_tsched_watermark(struct userdata *u) {
311 size_t max_use;
312 pa_assert(u);
313 pa_assert(u->use_tsched);
314
315 max_use = u->hwbuf_size - u->hwbuf_unused;
316
317 if (u->tsched_watermark > max_use - u->min_sleep)
318 u->tsched_watermark = max_use - u->min_sleep;
319
320 if (u->tsched_watermark < u->min_wakeup)
321 u->tsched_watermark = u->min_wakeup;
322
323 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
324 }
325
326 static void increase_watermark(struct userdata *u) {
327 size_t old_watermark;
328 pa_usec_t old_min_latency, new_min_latency;
329
330 pa_assert(u);
331 pa_assert(u->use_tsched);
332
333 /* First, just try to increase the watermark */
334 old_watermark = u->tsched_watermark;
335 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
336 fix_tsched_watermark(u);
337
338 if (old_watermark != u->tsched_watermark) {
339 pa_log_info("Increasing wakeup watermark to %0.2f ms",
340 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
341 return;
342 }
343
344 /* Hmm, we cannot increase the watermark any further, hence let's
345 raise the latency, unless doing so was disabled in
346 configuration */
347 if (u->fixed_latency_range)
348 return;
349
350 old_min_latency = u->sink->thread_info.min_latency;
351 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
352 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
353
354 if (old_min_latency != new_min_latency) {
355 pa_log_info("Increasing minimal latency to %0.2f ms",
356 (double) new_min_latency / PA_USEC_PER_MSEC);
357
358 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
359 }
360
361 /* When we reach this we're officialy fucked! */
362 }
363
364 static void decrease_watermark(struct userdata *u) {
365 size_t old_watermark;
366 pa_usec_t now;
367
368 pa_assert(u);
369 pa_assert(u->use_tsched);
370
371 now = pa_rtclock_now();
372
373 if (u->watermark_dec_not_before <= 0)
374 goto restart;
375
376 if (u->watermark_dec_not_before > now)
377 return;
378
379 old_watermark = u->tsched_watermark;
380
381 if (u->tsched_watermark < u->watermark_dec_step)
382 u->tsched_watermark = u->tsched_watermark / 2;
383 else
384 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
385
386 fix_tsched_watermark(u);
387
388 if (old_watermark != u->tsched_watermark)
389 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
390 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
391
392 /* We don't change the latency range*/
393
394 restart:
395 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
396 }
397
398 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
399 pa_usec_t usec, wm;
400
401 pa_assert(sleep_usec);
402 pa_assert(process_usec);
403
404 pa_assert(u);
405 pa_assert(u->use_tsched);
406
407 usec = pa_sink_get_requested_latency_within_thread(u->sink);
408
409 if (usec == (pa_usec_t) -1)
410 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
411
412 wm = u->tsched_watermark_usec;
413
414 if (wm > usec)
415 wm = usec/2;
416
417 *sleep_usec = usec - wm;
418 *process_usec = wm;
419
420 #ifdef DEBUG_TIMING
421 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
422 (unsigned long) (usec / PA_USEC_PER_MSEC),
423 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
424 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
425 #endif
426 }
427
428 static int try_recover(struct userdata *u, const char *call, int err) {
429 pa_assert(u);
430 pa_assert(call);
431 pa_assert(err < 0);
432
433 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
434
435 pa_assert(err != -EAGAIN);
436
437 if (err == -EPIPE)
438 pa_log_debug("%s: Buffer underrun!", call);
439
440 if (err == -ESTRPIPE)
441 pa_log_debug("%s: System suspended!", call);
442
443 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
444 pa_log("%s: %s", call, pa_alsa_strerror(err));
445 return -1;
446 }
447
448 u->first = TRUE;
449 u->since_start = 0;
450 return 0;
451 }
452
453 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
454 size_t left_to_play;
455 pa_bool_t underrun = FALSE;
456
457 /* We use <= instead of < for this check here because an underrun
458 * only happens after the last sample was processed, not already when
459 * it is removed from the buffer. This is particularly important
460 * when block transfer is used. */
461
462 if (n_bytes <= u->hwbuf_size)
463 left_to_play = u->hwbuf_size - n_bytes;
464 else {
465
466 /* We got a dropout. What a mess! */
467 left_to_play = 0;
468 underrun = TRUE;
469
470 #if 0
471 PA_DEBUG_TRAP;
472 #endif
473
474 if (!u->first && !u->after_rewind)
475 if (pa_log_ratelimit(PA_LOG_INFO))
476 pa_log_info("Underrun!");
477 }
478
479 #ifdef DEBUG_TIMING
480 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
481 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
482 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
483 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
484 #endif
485
486 if (u->use_tsched) {
487 pa_bool_t reset_not_before = TRUE;
488
489 if (!u->first && !u->after_rewind) {
490 if (underrun || left_to_play < u->watermark_inc_threshold)
491 increase_watermark(u);
492 else if (left_to_play > u->watermark_dec_threshold) {
493 reset_not_before = FALSE;
494
495 /* We decrease the watermark only if have actually
496 * been woken up by a timeout. If something else woke
497 * us up it's too easy to fulfill the deadlines... */
498
499 if (on_timeout)
500 decrease_watermark(u);
501 }
502 }
503
504 if (reset_not_before)
505 u->watermark_dec_not_before = 0;
506 }
507
508 return left_to_play;
509 }
510
511 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
512 pa_bool_t work_done = FALSE;
513 pa_usec_t max_sleep_usec = 0, process_usec = 0;
514 size_t left_to_play, input_underrun;
515 unsigned j = 0;
516
517 pa_assert(u);
518 pa_sink_assert_ref(u->sink);
519
520 if (u->use_tsched)
521 hw_sleep_time(u, &max_sleep_usec, &process_usec);
522
523 for (;;) {
524 snd_pcm_sframes_t n;
525 size_t n_bytes;
526 int r;
527 pa_bool_t after_avail = TRUE;
528
529 /* First we determine how many samples are missing to fill the
530 * buffer up to 100% */
531
532 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
533
534 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
535 continue;
536
537 return r;
538 }
539
540 n_bytes = (size_t) n * u->frame_size;
541
542 #ifdef DEBUG_TIMING
543 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
544 #endif
545
546 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
547 on_timeout = FALSE;
548
549 if (u->use_tsched)
550
551 /* We won't fill up the playback buffer before at least
552 * half the sleep time is over because otherwise we might
553 * ask for more data from the clients then they expect. We
554 * need to guarantee that clients only have to keep around
555 * a single hw buffer length. */
556
557 if (!polled &&
558 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
559 #ifdef DEBUG_TIMING
560 pa_log_debug("Not filling up, because too early.");
561 #endif
562 break;
563 }
564
565 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
566
567 if (polled)
568 PA_ONCE_BEGIN {
569 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
570 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
571 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
572 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
573 pa_strnull(dn));
574 pa_xfree(dn);
575 } PA_ONCE_END;
576
577 #ifdef DEBUG_TIMING
578 pa_log_debug("Not filling up, because not necessary.");
579 #endif
580 break;
581 }
582
583 if (++j > 10) {
584 #ifdef DEBUG_TIMING
585 pa_log_debug("Not filling up, because already too many iterations.");
586 #endif
587
588 break;
589 }
590
591 n_bytes -= u->hwbuf_unused;
592 polled = FALSE;
593
594 #ifdef DEBUG_TIMING
595 pa_log_debug("Filling up");
596 #endif
597
598 for (;;) {
599 pa_memchunk chunk;
600 void *p;
601 int err;
602 const snd_pcm_channel_area_t *areas;
603 snd_pcm_uframes_t offset, frames;
604 snd_pcm_sframes_t sframes;
605 size_t written;
606
607 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
608 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
609
610 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
611
612 if (!after_avail && err == -EAGAIN)
613 break;
614
615 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
616 continue;
617
618 return r;
619 }
620
621 /* Make sure that if these memblocks need to be copied they will fit into one slot */
622 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
623 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
624
625 if (!after_avail && frames == 0)
626 break;
627
628 pa_assert(frames > 0);
629 after_avail = FALSE;
630
631 /* Check these are multiples of 8 bit */
632 pa_assert((areas[0].first & 7) == 0);
633 pa_assert((areas[0].step & 7)== 0);
634
635 /* We assume a single interleaved memory buffer */
636 pa_assert((areas[0].first >> 3) == 0);
637 pa_assert((areas[0].step >> 3) == u->frame_size);
638
639 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
640
641 written = frames * u->frame_size;
642 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, written, TRUE);
643 chunk.length = pa_memblock_get_length(chunk.memblock);
644 chunk.index = 0;
645
646 pa_sink_render_into_full(u->sink, &chunk);
647 pa_memblock_unref_fixed(chunk.memblock);
648
649 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
650
651 if (!after_avail && (int) sframes == -EAGAIN)
652 break;
653
654 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
655 continue;
656
657 return r;
658 }
659
660 work_done = TRUE;
661
662 u->write_count += written;
663 u->since_start += written;
664
665 #ifdef DEBUG_TIMING
666 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) written, (unsigned long) n_bytes);
667 #endif
668
669 if (written >= n_bytes)
670 break;
671
672 n_bytes -= written;
673 }
674 }
675
676 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
677
678 if (u->use_tsched) {
679 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
680
681 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
682 process_usec = u->tsched_watermark_usec;
683
684 if (*sleep_usec > process_usec)
685 *sleep_usec -= process_usec;
686 else
687 *sleep_usec = 0;
688
689 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
690 } else
691 *sleep_usec = 0;
692
693 return work_done ? 1 : 0;
694 }
695
696 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
697 pa_bool_t work_done = FALSE;
698 pa_usec_t max_sleep_usec = 0, process_usec = 0;
699 size_t left_to_play, input_underrun;
700 unsigned j = 0;
701
702 pa_assert(u);
703 pa_sink_assert_ref(u->sink);
704
705 if (u->use_tsched)
706 hw_sleep_time(u, &max_sleep_usec, &process_usec);
707
708 for (;;) {
709 snd_pcm_sframes_t n;
710 size_t n_bytes;
711 int r;
712 pa_bool_t after_avail = TRUE;
713
714 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
715
716 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
717 continue;
718
719 return r;
720 }
721
722 n_bytes = (size_t) n * u->frame_size;
723
724 #ifdef DEBUG_TIMING
725 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
726 #endif
727
728 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
729 on_timeout = FALSE;
730
731 if (u->use_tsched)
732
733 /* We won't fill up the playback buffer before at least
734 * half the sleep time is over because otherwise we might
735 * ask for more data from the clients then they expect. We
736 * need to guarantee that clients only have to keep around
737 * a single hw buffer length. */
738
739 if (!polled &&
740 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
741 break;
742
743 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
744
745 if (polled)
746 PA_ONCE_BEGIN {
747 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
748 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
749 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
750 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
751 pa_strnull(dn));
752 pa_xfree(dn);
753 } PA_ONCE_END;
754
755 break;
756 }
757
758 if (++j > 10) {
759 #ifdef DEBUG_TIMING
760 pa_log_debug("Not filling up, because already too many iterations.");
761 #endif
762
763 break;
764 }
765
766 n_bytes -= u->hwbuf_unused;
767 polled = FALSE;
768
769 for (;;) {
770 snd_pcm_sframes_t frames;
771 void *p;
772 size_t written;
773
774 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
775
776 if (u->memchunk.length <= 0)
777 pa_sink_render(u->sink, n_bytes, &u->memchunk);
778
779 pa_assert(u->memchunk.length > 0);
780
781 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
782
783 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
784 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
785
786 p = pa_memblock_acquire(u->memchunk.memblock);
787 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
788 pa_memblock_release(u->memchunk.memblock);
789
790 if (PA_UNLIKELY(frames < 0)) {
791
792 if (!after_avail && (int) frames == -EAGAIN)
793 break;
794
795 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
796 continue;
797
798 return r;
799 }
800
801 if (!after_avail && frames == 0)
802 break;
803
804 pa_assert(frames > 0);
805 after_avail = FALSE;
806
807 written = frames * u->frame_size;
808 u->memchunk.index += written;
809 u->memchunk.length -= written;
810
811 if (u->memchunk.length <= 0) {
812 pa_memblock_unref(u->memchunk.memblock);
813 pa_memchunk_reset(&u->memchunk);
814 }
815
816 work_done = TRUE;
817
818 u->write_count += written;
819 u->since_start += written;
820
821 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
822
823 if (written >= n_bytes)
824 break;
825
826 n_bytes -= written;
827 }
828 }
829
830 input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
831
832 if (u->use_tsched) {
833 pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
834
835 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
836 process_usec = u->tsched_watermark_usec;
837
838 if (*sleep_usec > process_usec)
839 *sleep_usec -= process_usec;
840 else
841 *sleep_usec = 0;
842
843 *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
844 } else
845 *sleep_usec = 0;
846
847 return work_done ? 1 : 0;
848 }
849
850 static void update_smoother(struct userdata *u) {
851 snd_pcm_sframes_t delay = 0;
852 int64_t position;
853 int err;
854 pa_usec_t now1 = 0, now2;
855 snd_pcm_status_t *status;
856 snd_htimestamp_t htstamp = { 0, 0 };
857
858 snd_pcm_status_alloca(&status);
859
860 pa_assert(u);
861 pa_assert(u->pcm_handle);
862
863 /* Let's update the time smoother */
864
865 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
866 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
867 return;
868 }
869
870 snd_pcm_status_get_htstamp(status, &htstamp);
871 now1 = pa_timespec_load(&htstamp);
872
873 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
874 if (now1 <= 0)
875 now1 = pa_rtclock_now();
876
877 /* check if the time since the last update is bigger than the interval */
878 if (u->last_smoother_update > 0)
879 if (u->last_smoother_update + u->smoother_interval > now1)
880 return;
881
882 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
883
884 if (PA_UNLIKELY(position < 0))
885 position = 0;
886
887 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
888
889 pa_smoother_put(u->smoother, now1, now2);
890
891 u->last_smoother_update = now1;
892 /* exponentially increase the update interval up to the MAX limit */
893 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
894 }
895
896 static pa_usec_t sink_get_latency(struct userdata *u) {
897 pa_usec_t r;
898 int64_t delay;
899 pa_usec_t now1, now2;
900
901 pa_assert(u);
902
903 now1 = pa_rtclock_now();
904 now2 = pa_smoother_get(u->smoother, now1);
905
906 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
907
908 r = delay >= 0 ? (pa_usec_t) delay : 0;
909
910 if (u->memchunk.memblock)
911 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
912
913 return r;
914 }
915
916 static int build_pollfd(struct userdata *u) {
917 pa_assert(u);
918 pa_assert(u->pcm_handle);
919
920 if (u->alsa_rtpoll_item)
921 pa_rtpoll_item_free(u->alsa_rtpoll_item);
922
923 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
924 return -1;
925
926 return 0;
927 }
928
929 /* Called from IO context */
930 static int suspend(struct userdata *u) {
931 pa_assert(u);
932 pa_assert(u->pcm_handle);
933
934 pa_smoother_pause(u->smoother, pa_rtclock_now());
935
936 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
937 * take awfully long with our long buffer sizes today. */
938 snd_pcm_close(u->pcm_handle);
939 u->pcm_handle = NULL;
940
941 if (u->alsa_rtpoll_item) {
942 pa_rtpoll_item_free(u->alsa_rtpoll_item);
943 u->alsa_rtpoll_item = NULL;
944 }
945
946 /* We reset max_rewind/max_request here to make sure that while we
947 * are suspended the old max_request/max_rewind values set before
948 * the suspend can influence the per-stream buffer of newly
949 * created streams, without their requirements having any
950 * influence on them. */
951 pa_sink_set_max_rewind_within_thread(u->sink, 0);
952 pa_sink_set_max_request_within_thread(u->sink, 0);
953
954 pa_log_info("Device suspended...");
955
956 return 0;
957 }
958
959 /* Called from IO context */
960 static int update_sw_params(struct userdata *u) {
961 snd_pcm_uframes_t avail_min;
962 int err;
963
964 pa_assert(u);
965
966 /* Use the full buffer if no one asked us for anything specific */
967 u->hwbuf_unused = 0;
968
969 if (u->use_tsched) {
970 pa_usec_t latency;
971
972 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
973 size_t b;
974
975 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
976
977 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
978
979 /* We need at least one sample in our buffer */
980
981 if (PA_UNLIKELY(b < u->frame_size))
982 b = u->frame_size;
983
984 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
985 }
986
987 fix_min_sleep_wakeup(u);
988 fix_tsched_watermark(u);
989 }
990
991 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
992
993 /* We need at last one frame in the used part of the buffer */
994 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
995
996 if (u->use_tsched) {
997 pa_usec_t sleep_usec, process_usec;
998
999 hw_sleep_time(u, &sleep_usec, &process_usec);
1000 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
1001 }
1002
1003 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1004
1005 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1006 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1007 return err;
1008 }
1009
1010 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1011 if (pa_alsa_pcm_is_hw(u->pcm_handle))
1012 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
1013 else {
1014 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
1015 pa_sink_set_max_rewind_within_thread(u->sink, 0);
1016 }
1017
1018 return 0;
1019 }
1020
1021 /* Called from IO Context on unsuspend or from main thread when creating sink */
1022 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
1023 pa_bool_t in_thread) {
1024 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
1025 &u->sink->sample_spec);
1026
1027 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1028 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1029
1030 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1031 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1032
1033 fix_min_sleep_wakeup(u);
1034 fix_tsched_watermark(u);
1035
1036 if (in_thread)
1037 pa_sink_set_latency_range_within_thread(u->sink,
1038 u->min_latency_ref,
1039 pa_bytes_to_usec(u->hwbuf_size, ss));
1040 else {
1041 pa_sink_set_latency_range(u->sink,
1042 0,
1043 pa_bytes_to_usec(u->hwbuf_size, ss));
1044
1045 /* work-around assert in pa_sink_set_latency_within_thead,
1046 keep track of min_latency and reuse it when
1047 this routine is called from IO context */
1048 u->min_latency_ref = u->sink->thread_info.min_latency;
1049 }
1050
1051 pa_log_info("Time scheduling watermark is %0.2fms",
1052 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
1053 }
1054
1055 /* Called from IO context */
1056 static int unsuspend(struct userdata *u) {
1057 pa_sample_spec ss;
1058 int err;
1059 pa_bool_t b, d;
1060 snd_pcm_uframes_t period_size, buffer_size;
1061 char *device_name = NULL;
1062
1063 pa_assert(u);
1064 pa_assert(!u->pcm_handle);
1065
1066 pa_log_info("Trying resume...");
1067
1068 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1069 /* Need to open device in NONAUDIO mode */
1070 int len = strlen(u->device_name) + 8;
1071
1072 device_name = pa_xmalloc(len);
1073 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1074 }
1075
1076 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1077 SND_PCM_NONBLOCK|
1078 SND_PCM_NO_AUTO_RESAMPLE|
1079 SND_PCM_NO_AUTO_CHANNELS|
1080 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1081 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1082 goto fail;
1083 }
1084
1085 ss = u->sink->sample_spec;
1086 period_size = u->fragment_size / u->frame_size;
1087 buffer_size = u->hwbuf_size / u->frame_size;
1088 b = u->use_mmap;
1089 d = u->use_tsched;
1090
1091 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1092 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1093 goto fail;
1094 }
1095
1096 if (b != u->use_mmap || d != u->use_tsched) {
1097 pa_log_warn("Resume failed, couldn't get original access mode.");
1098 goto fail;
1099 }
1100
1101 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1102 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1103 goto fail;
1104 }
1105
1106 if (period_size*u->frame_size != u->fragment_size ||
1107 buffer_size*u->frame_size != u->hwbuf_size) {
1108 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1109 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1110 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1111 goto fail;
1112 }
1113
1114 if (update_sw_params(u) < 0)
1115 goto fail;
1116
1117 if (build_pollfd(u) < 0)
1118 goto fail;
1119
1120 u->write_count = 0;
1121 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1122 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1123 u->last_smoother_update = 0;
1124
1125 u->first = TRUE;
1126 u->since_start = 0;
1127
1128 /* reset the watermark to the value defined when sink was created */
1129 if (u->use_tsched)
1130 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1131
1132 pa_log_info("Resumed successfully...");
1133
1134 pa_xfree(device_name);
1135 return 0;
1136
1137 fail:
1138 if (u->pcm_handle) {
1139 snd_pcm_close(u->pcm_handle);
1140 u->pcm_handle = NULL;
1141 }
1142
1143 pa_xfree(device_name);
1144
1145 return -PA_ERR_IO;
1146 }
1147
1148 /* Called from IO context */
1149 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1150 struct userdata *u = PA_SINK(o)->userdata;
1151
1152 switch (code) {
1153
1154 case PA_SINK_MESSAGE_GET_LATENCY: {
1155 pa_usec_t r = 0;
1156
1157 if (u->pcm_handle)
1158 r = sink_get_latency(u);
1159
1160 *((pa_usec_t*) data) = r;
1161
1162 return 0;
1163 }
1164
1165 case PA_SINK_MESSAGE_SET_STATE:
1166
1167 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1168
1169 case PA_SINK_SUSPENDED: {
1170 int r;
1171
1172 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1173
1174 if ((r = suspend(u)) < 0)
1175 return r;
1176
1177 break;
1178 }
1179
1180 case PA_SINK_IDLE:
1181 case PA_SINK_RUNNING: {
1182 int r;
1183
1184 if (u->sink->thread_info.state == PA_SINK_INIT) {
1185 if (build_pollfd(u) < 0)
1186 return -PA_ERR_IO;
1187 }
1188
1189 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1190 if ((r = unsuspend(u)) < 0)
1191 return r;
1192 }
1193
1194 break;
1195 }
1196
1197 case PA_SINK_UNLINKED:
1198 case PA_SINK_INIT:
1199 case PA_SINK_INVALID_STATE:
1200 ;
1201 }
1202
1203 break;
1204 }
1205
1206 return pa_sink_process_msg(o, code, data, offset, chunk);
1207 }
1208
1209 /* Called from main context */
1210 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1211 pa_sink_state_t old_state;
1212 struct userdata *u;
1213
1214 pa_sink_assert_ref(s);
1215 pa_assert_se(u = s->userdata);
1216
1217 old_state = pa_sink_get_state(u->sink);
1218
1219 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1220 reserve_done(u);
1221 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1222 if (reserve_init(u, u->device_name) < 0)
1223 return -PA_ERR_BUSY;
1224
1225 return 0;
1226 }
1227
1228 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1229 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1230
1231 pa_assert(u);
1232 pa_assert(u->mixer_handle);
1233
1234 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1235 return 0;
1236
1237 if (!PA_SINK_IS_LINKED(u->sink->state))
1238 return 0;
1239
1240 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1241 pa_sink_set_mixer_dirty(u->sink, TRUE);
1242 return 0;
1243 }
1244
1245 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1246 pa_sink_get_volume(u->sink, TRUE);
1247 pa_sink_get_mute(u->sink, TRUE);
1248 }
1249
1250 return 0;
1251 }
1252
1253 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1254 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1255
1256 pa_assert(u);
1257 pa_assert(u->mixer_handle);
1258
1259 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1260 return 0;
1261
1262 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1263 pa_sink_set_mixer_dirty(u->sink, TRUE);
1264 return 0;
1265 }
1266
1267 if (mask & SND_CTL_EVENT_MASK_VALUE)
1268 pa_sink_update_volume_and_mute(u->sink);
1269
1270 return 0;
1271 }
1272
1273 static void sink_get_volume_cb(pa_sink *s) {
1274 struct userdata *u = s->userdata;
1275 pa_cvolume r;
1276 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1277
1278 pa_assert(u);
1279 pa_assert(u->mixer_path);
1280 pa_assert(u->mixer_handle);
1281
1282 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1283 return;
1284
1285 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1286 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1287
1288 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1289
1290 if (u->mixer_path->has_dB) {
1291 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1292
1293 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1294 }
1295
1296 if (pa_cvolume_equal(&u->hardware_volume, &r))
1297 return;
1298
1299 s->real_volume = u->hardware_volume = r;
1300
1301 /* Hmm, so the hardware volume changed, let's reset our software volume */
1302 if (u->mixer_path->has_dB)
1303 pa_sink_set_soft_volume(s, NULL);
1304 }
1305
1306 static void sink_set_volume_cb(pa_sink *s) {
1307 struct userdata *u = s->userdata;
1308 pa_cvolume r;
1309 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1310 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1311
1312 pa_assert(u);
1313 pa_assert(u->mixer_path);
1314 pa_assert(u->mixer_handle);
1315
1316 /* Shift up by the base volume */
1317 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1318
1319 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1320 return;
1321
1322 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1323 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1324
1325 u->hardware_volume = r;
1326
1327 if (u->mixer_path->has_dB) {
1328 pa_cvolume new_soft_volume;
1329 pa_bool_t accurate_enough;
1330 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1331
1332 /* Match exactly what the user requested by software */
1333 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1334
1335 /* If the adjustment to do in software is only minimal we
1336 * can skip it. That saves us CPU at the expense of a bit of
1337 * accuracy */
1338 accurate_enough =
1339 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1340 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1341
1342 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1343 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1344 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1345 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1346 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1347 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1348 pa_yes_no(accurate_enough));
1349 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1350
1351 if (!accurate_enough)
1352 s->soft_volume = new_soft_volume;
1353
1354 } else {
1355 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1356
1357 /* We can't match exactly what the user requested, hence let's
1358 * at least tell the user about it */
1359
1360 s->real_volume = r;
1361 }
1362 }
1363
1364 static void sink_write_volume_cb(pa_sink *s) {
1365 struct userdata *u = s->userdata;
1366 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1367
1368 pa_assert(u);
1369 pa_assert(u->mixer_path);
1370 pa_assert(u->mixer_handle);
1371 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1372
1373 /* Shift up by the base volume */
1374 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1375
1376 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1377 pa_log_error("Writing HW volume failed");
1378 else {
1379 pa_cvolume tmp_vol;
1380 pa_bool_t accurate_enough;
1381
1382 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1383 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1384
1385 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1386 accurate_enough =
1387 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1388 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1389
1390 if (!accurate_enough) {
1391 union {
1392 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1393 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1394 } vol;
1395
1396 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1397 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1398 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1399 pa_log_debug(" in dB: %s (request) != %s",
1400 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1401 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1402 }
1403 }
1404 }
1405
1406 static void sink_get_mute_cb(pa_sink *s) {
1407 struct userdata *u = s->userdata;
1408 pa_bool_t b;
1409
1410 pa_assert(u);
1411 pa_assert(u->mixer_path);
1412 pa_assert(u->mixer_handle);
1413
1414 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1415 return;
1416
1417 s->muted = b;
1418 }
1419
1420 static void sink_set_mute_cb(pa_sink *s) {
1421 struct userdata *u = s->userdata;
1422
1423 pa_assert(u);
1424 pa_assert(u->mixer_path);
1425 pa_assert(u->mixer_handle);
1426
1427 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1428 }
1429
1430 static void mixer_volume_init(struct userdata *u) {
1431 pa_assert(u);
1432
1433 if (!u->mixer_path->has_volume) {
1434 pa_sink_set_write_volume_callback(u->sink, NULL);
1435 pa_sink_set_get_volume_callback(u->sink, NULL);
1436 pa_sink_set_set_volume_callback(u->sink, NULL);
1437
1438 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1439 } else {
1440 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1441 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1442
1443 if (u->mixer_path->has_dB && u->deferred_volume) {
1444 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1445 pa_log_info("Successfully enabled deferred volume.");
1446 } else
1447 pa_sink_set_write_volume_callback(u->sink, NULL);
1448
1449 if (u->mixer_path->has_dB) {
1450 pa_sink_enable_decibel_volume(u->sink, TRUE);
1451 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1452
1453 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1454 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1455
1456 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1457 } else {
1458 pa_sink_enable_decibel_volume(u->sink, FALSE);
1459 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1460
1461 u->sink->base_volume = PA_VOLUME_NORM;
1462 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1463 }
1464
1465 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1466 }
1467
1468 if (!u->mixer_path->has_mute) {
1469 pa_sink_set_get_mute_callback(u->sink, NULL);
1470 pa_sink_set_set_mute_callback(u->sink, NULL);
1471 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1472 } else {
1473 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1474 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1475 pa_log_info("Using hardware mute control.");
1476 }
1477 }
1478
1479 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1480 struct userdata *u = s->userdata;
1481
1482 pa_assert(u);
1483 pa_assert(p);
1484 pa_assert(u->ucm_context);
1485
1486 return pa_alsa_ucm_set_port(u->ucm_context, p, TRUE);
1487 }
1488
1489 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1490 struct userdata *u = s->userdata;
1491 pa_alsa_port_data *data;
1492
1493 pa_assert(u);
1494 pa_assert(p);
1495 pa_assert(u->mixer_handle);
1496
1497 data = PA_DEVICE_PORT_DATA(p);
1498
1499 pa_assert_se(u->mixer_path = data->path);
1500 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1501
1502 mixer_volume_init(u);
1503
1504 if (s->set_mute)
1505 s->set_mute(s);
1506 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1507 if (s->write_volume)
1508 s->write_volume(s);
1509 } else {
1510 if (s->set_volume)
1511 s->set_volume(s);
1512 }
1513
1514 return 0;
1515 }
1516
1517 static void sink_update_requested_latency_cb(pa_sink *s) {
1518 struct userdata *u = s->userdata;
1519 size_t before;
1520 pa_assert(u);
1521 pa_assert(u->use_tsched); /* only when timer scheduling is used
1522 * we can dynamically adjust the
1523 * latency */
1524
1525 if (!u->pcm_handle)
1526 return;
1527
1528 before = u->hwbuf_unused;
1529 update_sw_params(u);
1530
1531 /* Let's check whether we now use only a smaller part of the
1532 buffer then before. If so, we need to make sure that subsequent
1533 rewinds are relative to the new maximum fill level and not to the
1534 current fill level. Thus, let's do a full rewind once, to clear
1535 things up. */
1536
1537 if (u->hwbuf_unused > before) {
1538 pa_log_debug("Requesting rewind due to latency change.");
1539 pa_sink_request_rewind(s, (size_t) -1);
1540 }
1541 }
1542
1543 static pa_idxset* sink_get_formats(pa_sink *s) {
1544 struct userdata *u = s->userdata;
1545 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1546 pa_format_info *f;
1547 uint32_t idx;
1548
1549 pa_assert(u);
1550
1551 PA_IDXSET_FOREACH(f, u->formats, idx) {
1552 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1553 }
1554
1555 return ret;
1556 }
1557
1558 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1559 struct userdata *u = s->userdata;
1560 pa_format_info *f, *g;
1561 uint32_t idx, n;
1562
1563 pa_assert(u);
1564
1565 /* FIXME: also validate sample rates against what the device supports */
1566 PA_IDXSET_FOREACH(f, formats, idx) {
1567 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1568 /* EAC3 cannot be sent over over S/PDIF */
1569 return FALSE;
1570 }
1571
1572 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
1573 u->formats = pa_idxset_new(NULL, NULL);
1574
1575 /* Note: the logic below won't apply if we're using software encoding.
1576 * This is fine for now since we don't support that via the passthrough
1577 * framework, but this must be changed if we do. */
1578
1579 /* Count how many sample rates we support */
1580 for (idx = 0, n = 0; u->rates[idx]; idx++)
1581 n++;
1582
1583 /* First insert non-PCM formats since we prefer those. */
1584 PA_IDXSET_FOREACH(f, formats, idx) {
1585 if (!pa_format_info_is_pcm(f)) {
1586 g = pa_format_info_copy(f);
1587 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1588 pa_idxset_put(u->formats, g, NULL);
1589 }
1590 }
1591
1592 /* Now add any PCM formats */
1593 PA_IDXSET_FOREACH(f, formats, idx) {
1594 if (pa_format_info_is_pcm(f)) {
1595 /* We don't set rates here since we'll just tack on a resampler for
1596 * unsupported rates */
1597 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1598 }
1599 }
1600
1601 return TRUE;
1602 }
1603
1604 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate) {
1605 struct userdata *u = s->userdata;
1606 int i;
1607 pa_bool_t supported = FALSE;
1608
1609 pa_assert(u);
1610
1611 for (i = 0; u->rates[i]; i++) {
1612 if (u->rates[i] == rate) {
1613 supported = TRUE;
1614 break;
1615 }
1616 }
1617
1618 if (!supported) {
1619 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1620 return FALSE;
1621 }
1622
1623 if (!PA_SINK_IS_OPENED(s->state)) {
1624 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1625 u->sink->sample_spec.rate = rate;
1626 return TRUE;
1627 }
1628
1629 return FALSE;
1630 }
1631
1632 static int process_rewind(struct userdata *u) {
1633 snd_pcm_sframes_t unused;
1634 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1635 pa_assert(u);
1636
1637 if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1638 pa_sink_process_rewind(u->sink, 0);
1639 return 0;
1640 }
1641
1642 /* Figure out how much we shall rewind and reset the counter */
1643 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1644
1645 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1646
1647 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1648 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1649 return -1;
1650 }
1651
1652 unused_nbytes = (size_t) unused * u->frame_size;
1653
1654 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1655 unused_nbytes += u->rewind_safeguard;
1656
1657 if (u->hwbuf_size > unused_nbytes)
1658 limit_nbytes = u->hwbuf_size - unused_nbytes;
1659 else
1660 limit_nbytes = 0;
1661
1662 if (rewind_nbytes > limit_nbytes)
1663 rewind_nbytes = limit_nbytes;
1664
1665 if (rewind_nbytes > 0) {
1666 snd_pcm_sframes_t in_frames, out_frames;
1667
1668 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1669
1670 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1671 pa_log_debug("before: %lu", (unsigned long) in_frames);
1672 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1673 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1674 if (try_recover(u, "process_rewind", out_frames) < 0)
1675 return -1;
1676 out_frames = 0;
1677 }
1678
1679 pa_log_debug("after: %lu", (unsigned long) out_frames);
1680
1681 rewind_nbytes = (size_t) out_frames * u->frame_size;
1682
1683 if (rewind_nbytes <= 0)
1684 pa_log_info("Tried rewind, but was apparently not possible.");
1685 else {
1686 u->write_count -= rewind_nbytes;
1687 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1688 pa_sink_process_rewind(u->sink, rewind_nbytes);
1689
1690 u->after_rewind = TRUE;
1691 return 0;
1692 }
1693 } else
1694 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1695
1696 pa_sink_process_rewind(u->sink, 0);
1697 return 0;
1698 }
1699
1700 static void thread_func(void *userdata) {
1701 struct userdata *u = userdata;
1702 unsigned short revents = 0;
1703
1704 pa_assert(u);
1705
1706 pa_log_debug("Thread starting up");
1707
1708 if (u->core->realtime_scheduling)
1709 pa_make_realtime(u->core->realtime_priority);
1710
1711 pa_thread_mq_install(&u->thread_mq);
1712
1713 for (;;) {
1714 int ret;
1715 pa_usec_t rtpoll_sleep = 0, real_sleep;
1716
1717 #ifdef DEBUG_TIMING
1718 pa_log_debug("Loop");
1719 #endif
1720
1721 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
1722 if (process_rewind(u) < 0)
1723 goto fail;
1724 }
1725
1726 /* Render some data and write it to the dsp */
1727 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1728 int work_done;
1729 pa_usec_t sleep_usec = 0;
1730 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1731
1732 if (u->use_mmap)
1733 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1734 else
1735 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1736
1737 if (work_done < 0)
1738 goto fail;
1739
1740 /* pa_log_debug("work_done = %i", work_done); */
1741
1742 if (work_done) {
1743
1744 if (u->first) {
1745 pa_log_info("Starting playback.");
1746 snd_pcm_start(u->pcm_handle);
1747
1748 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1749
1750 u->first = FALSE;
1751 }
1752
1753 update_smoother(u);
1754 }
1755
1756 if (u->use_tsched) {
1757 pa_usec_t cusec;
1758
1759 if (u->since_start <= u->hwbuf_size) {
1760
1761 /* USB devices on ALSA seem to hit a buffer
1762 * underrun during the first iterations much
1763 * quicker then we calculate here, probably due to
1764 * the transport latency. To accommodate for that
1765 * we artificially decrease the sleep time until
1766 * we have filled the buffer at least once
1767 * completely.*/
1768
1769 if (pa_log_ratelimit(PA_LOG_DEBUG))
1770 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1771 sleep_usec /= 2;
1772 }
1773
1774 /* OK, the playback buffer is now full, let's
1775 * calculate when to wake up next */
1776 #ifdef DEBUG_TIMING
1777 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1778 #endif
1779
1780 /* Convert from the sound card time domain to the
1781 * system time domain */
1782 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1783
1784 #ifdef DEBUG_TIMING
1785 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1786 #endif
1787
1788 /* We don't trust the conversion, so we wake up whatever comes first */
1789 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1790 }
1791
1792 u->after_rewind = FALSE;
1793
1794 }
1795
1796 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1797 pa_usec_t volume_sleep;
1798 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1799 if (volume_sleep > 0) {
1800 if (rtpoll_sleep > 0)
1801 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1802 else
1803 rtpoll_sleep = volume_sleep;
1804 }
1805 }
1806
1807 if (rtpoll_sleep > 0) {
1808 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1809 real_sleep = pa_rtclock_now();
1810 }
1811 else
1812 pa_rtpoll_set_timer_disabled(u->rtpoll);
1813
1814 /* Hmm, nothing to do. Let's sleep */
1815 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1816 goto fail;
1817
1818 if (rtpoll_sleep > 0) {
1819 real_sleep = pa_rtclock_now() - real_sleep;
1820 #ifdef DEBUG_TIMING
1821 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1822 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1823 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1824 #endif
1825 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1826 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1827 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1828 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1829 }
1830
1831 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1832 pa_sink_volume_change_apply(u->sink, NULL);
1833
1834 if (ret == 0)
1835 goto finish;
1836
1837 /* Tell ALSA about this and process its response */
1838 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1839 struct pollfd *pollfd;
1840 int err;
1841 unsigned n;
1842
1843 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1844
1845 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1846 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1847 goto fail;
1848 }
1849
1850 if (revents & ~POLLOUT) {
1851 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1852 goto fail;
1853
1854 u->first = TRUE;
1855 u->since_start = 0;
1856 revents = 0;
1857 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1858 pa_log_debug("Wakeup from ALSA!");
1859
1860 } else
1861 revents = 0;
1862 }
1863
1864 fail:
1865 /* If this was no regular exit from the loop we have to continue
1866 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1867 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1868 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1869
1870 finish:
1871 pa_log_debug("Thread shutting down");
1872 }
1873
1874 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1875 const char *n;
1876 char *t;
1877
1878 pa_assert(data);
1879 pa_assert(ma);
1880 pa_assert(device_name);
1881
1882 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1883 pa_sink_new_data_set_name(data, n);
1884 data->namereg_fail = TRUE;
1885 return;
1886 }
1887
1888 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1889 data->namereg_fail = TRUE;
1890 else {
1891 n = device_id ? device_id : device_name;
1892 data->namereg_fail = FALSE;
1893 }
1894
1895 if (mapping)
1896 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1897 else
1898 t = pa_sprintf_malloc("alsa_output.%s", n);
1899
1900 pa_sink_new_data_set_name(data, t);
1901 pa_xfree(t);
1902 }
1903
1904 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1905 snd_hctl_t *hctl;
1906
1907 if (!mapping && !element)
1908 return;
1909
1910 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1911 pa_log_info("Failed to find a working mixer device.");
1912 return;
1913 }
1914
1915 if (element) {
1916
1917 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1918 goto fail;
1919
1920 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1921 goto fail;
1922
1923 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1924 pa_alsa_path_dump(u->mixer_path);
1925 } else if (!(u->mixer_path_set = mapping->output_path_set))
1926 goto fail;
1927
1928 return;
1929
1930 fail:
1931
1932 if (u->mixer_path) {
1933 pa_alsa_path_free(u->mixer_path);
1934 u->mixer_path = NULL;
1935 }
1936
1937 if (u->mixer_handle) {
1938 snd_mixer_close(u->mixer_handle);
1939 u->mixer_handle = NULL;
1940 }
1941 }
1942
1943 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1944 pa_bool_t need_mixer_callback = FALSE;
1945
1946 pa_assert(u);
1947
1948 if (!u->mixer_handle)
1949 return 0;
1950
1951 if (u->sink->active_port) {
1952 pa_alsa_port_data *data;
1953
1954 /* We have a list of supported paths, so let's activate the
1955 * one that has been chosen as active */
1956
1957 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1958 u->mixer_path = data->path;
1959
1960 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
1961
1962 } else {
1963
1964 if (!u->mixer_path && u->mixer_path_set)
1965 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1966
1967 if (u->mixer_path) {
1968 /* Hmm, we have only a single path, then let's activate it */
1969
1970 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
1971
1972 } else
1973 return 0;
1974 }
1975
1976 mixer_volume_init(u);
1977
1978 /* Will we need to register callbacks? */
1979 if (u->mixer_path_set && u->mixer_path_set->paths) {
1980 pa_alsa_path *p;
1981 void *state;
1982
1983 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1984 if (p->has_volume || p->has_mute)
1985 need_mixer_callback = TRUE;
1986 }
1987 }
1988 else if (u->mixer_path)
1989 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1990
1991 if (need_mixer_callback) {
1992 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1993 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1994 u->mixer_pd = pa_alsa_mixer_pdata_new();
1995 mixer_callback = io_mixer_callback;
1996
1997 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1998 pa_log("Failed to initialize file descriptor monitoring");
1999 return -1;
2000 }
2001 } else {
2002 u->mixer_fdl = pa_alsa_fdlist_new();
2003 mixer_callback = ctl_mixer_callback;
2004
2005 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
2006 pa_log("Failed to initialize file descriptor monitoring");
2007 return -1;
2008 }
2009 }
2010
2011 if (u->mixer_path_set)
2012 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
2013 else
2014 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
2015 }
2016
2017 return 0;
2018 }
2019
2020 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
2021
2022 struct userdata *u = NULL;
2023 const char *dev_id = NULL, *key, *mod_name;
2024 pa_sample_spec ss;
2025 char *thread_name = NULL;
2026 uint32_t alternate_sample_rate;
2027 pa_channel_map map;
2028 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2029 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2030 size_t frame_size;
2031 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
2032 pa_sink_new_data data;
2033 pa_alsa_profile_set *profile_set = NULL;
2034 void *state = NULL;
2035
2036 pa_assert(m);
2037 pa_assert(ma);
2038
2039 ss = m->core->default_sample_spec;
2040 map = m->core->default_channel_map;
2041 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2042 pa_log("Failed to parse sample specification and channel map");
2043 goto fail;
2044 }
2045
2046 alternate_sample_rate = m->core->alternate_sample_rate;
2047 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2048 pa_log("Failed to parse alternate sample rate");
2049 goto fail;
2050 }
2051
2052 frame_size = pa_frame_size(&ss);
2053
2054 nfrags = m->core->default_n_fragments;
2055 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2056 if (frag_size <= 0)
2057 frag_size = (uint32_t) frame_size;
2058 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2059 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2060
2061 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2062 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2063 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2064 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2065 pa_log("Failed to parse buffer metrics");
2066 goto fail;
2067 }
2068
2069 buffer_size = nfrags * frag_size;
2070
2071 period_frames = frag_size/frame_size;
2072 buffer_frames = buffer_size/frame_size;
2073 tsched_frames = tsched_size/frame_size;
2074
2075 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2076 pa_log("Failed to parse mmap argument.");
2077 goto fail;
2078 }
2079
2080 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2081 pa_log("Failed to parse tsched argument.");
2082 goto fail;
2083 }
2084
2085 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2086 pa_log("Failed to parse ignore_dB argument.");
2087 goto fail;
2088 }
2089
2090 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2091 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2092 pa_log("Failed to parse rewind_safeguard argument");
2093 goto fail;
2094 }
2095
2096 deferred_volume = m->core->deferred_volume;
2097 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2098 pa_log("Failed to parse deferred_volume argument.");
2099 goto fail;
2100 }
2101
2102 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2103 pa_log("Failed to parse fixed_latency_range argument.");
2104 goto fail;
2105 }
2106
2107 use_tsched = pa_alsa_may_tsched(use_tsched);
2108
2109 u = pa_xnew0(struct userdata, 1);
2110 u->core = m->core;
2111 u->module = m;
2112 u->use_mmap = use_mmap;
2113 u->use_tsched = use_tsched;
2114 u->deferred_volume = deferred_volume;
2115 u->fixed_latency_range = fixed_latency_range;
2116 u->first = TRUE;
2117 u->rewind_safeguard = rewind_safeguard;
2118 u->rtpoll = pa_rtpoll_new();
2119 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2120
2121 u->smoother = pa_smoother_new(
2122 SMOOTHER_ADJUST_USEC,
2123 SMOOTHER_WINDOW_USEC,
2124 TRUE,
2125 TRUE,
2126 5,
2127 pa_rtclock_now(),
2128 TRUE);
2129 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2130
2131 /* use ucm */
2132 if (mapping && mapping->ucm_context.ucm)
2133 u->ucm_context = &mapping->ucm_context;
2134
2135 dev_id = pa_modargs_get_value(
2136 ma, "device_id",
2137 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2138
2139 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2140
2141 if (reserve_init(u, dev_id) < 0)
2142 goto fail;
2143
2144 if (reserve_monitor_init(u, dev_id) < 0)
2145 goto fail;
2146
2147 b = use_mmap;
2148 d = use_tsched;
2149
2150 if (mapping) {
2151
2152 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2153 pa_log("device_id= not set");
2154 goto fail;
2155 }
2156
2157 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2158 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2159 pa_log("Failed to enable ucm modifier %s", mod_name);
2160 else
2161 pa_log_debug("Enabled ucm modifier %s", mod_name);
2162 }
2163
2164 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2165 dev_id,
2166 &u->device_name,
2167 &ss, &map,
2168 SND_PCM_STREAM_PLAYBACK,
2169 &period_frames, &buffer_frames, tsched_frames,
2170 &b, &d, mapping)))
2171 goto fail;
2172
2173 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2174
2175 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2176 goto fail;
2177
2178 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2179 dev_id,
2180 &u->device_name,
2181 &ss, &map,
2182 SND_PCM_STREAM_PLAYBACK,
2183 &period_frames, &buffer_frames, tsched_frames,
2184 &b, &d, profile_set, &mapping)))
2185 goto fail;
2186
2187 } else {
2188
2189 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2190 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2191 &u->device_name,
2192 &ss, &map,
2193 SND_PCM_STREAM_PLAYBACK,
2194 &period_frames, &buffer_frames, tsched_frames,
2195 &b, &d, FALSE)))
2196 goto fail;
2197 }
2198
2199 pa_assert(u->device_name);
2200 pa_log_info("Successfully opened device %s.", u->device_name);
2201
2202 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2203 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2204 goto fail;
2205 }
2206
2207 if (mapping)
2208 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2209
2210 if (use_mmap && !b) {
2211 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2212 u->use_mmap = use_mmap = FALSE;
2213 }
2214
2215 if (use_tsched && (!b || !d)) {
2216 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2217 u->use_tsched = use_tsched = FALSE;
2218 }
2219
2220 if (u->use_mmap)
2221 pa_log_info("Successfully enabled mmap() mode.");
2222
2223 if (u->use_tsched) {
2224 pa_log_info("Successfully enabled timer-based scheduling mode.");
2225
2226 if (u->fixed_latency_range)
2227 pa_log_info("Disabling latency range changes on underrun");
2228 }
2229
2230 if (is_iec958(u) || is_hdmi(u))
2231 set_formats = TRUE;
2232
2233 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2234 if (!u->rates) {
2235 pa_log_error("Failed to find any supported sample rates.");
2236 goto fail;
2237 }
2238
2239 /* ALSA might tweak the sample spec, so recalculate the frame size */
2240 frame_size = pa_frame_size(&ss);
2241
2242 if (!u->ucm_context)
2243 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2244
2245 pa_sink_new_data_init(&data);
2246 data.driver = driver;
2247 data.module = m;
2248 data.card = card;
2249 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2250
2251 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2252 * variable instead of using &data.namereg_fail directly, because
2253 * data.namereg_fail is a bitfield and taking the address of a bitfield
2254 * variable is impossible. */
2255 namereg_fail = data.namereg_fail;
2256 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2257 pa_log("Failed to parse namereg_fail argument.");
2258 pa_sink_new_data_done(&data);
2259 goto fail;
2260 }
2261 data.namereg_fail = namereg_fail;
2262
2263 pa_sink_new_data_set_sample_spec(&data, &ss);
2264 pa_sink_new_data_set_channel_map(&data, &map);
2265 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2266
2267 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2268 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2269 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2270 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2271 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2272
2273 if (mapping) {
2274 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2275 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2276
2277 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2278 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2279 }
2280
2281 pa_alsa_init_description(data.proplist);
2282
2283 if (u->control_device)
2284 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2285
2286 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2287 pa_log("Invalid properties");
2288 pa_sink_new_data_done(&data);
2289 goto fail;
2290 }
2291
2292 if (u->ucm_context)
2293 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, TRUE, card);
2294 else if (u->mixer_path_set)
2295 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2296
2297 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2298 (set_formats ? PA_SINK_SET_FORMATS : 0));
2299 pa_sink_new_data_done(&data);
2300
2301 if (!u->sink) {
2302 pa_log("Failed to create sink object");
2303 goto fail;
2304 }
2305
2306 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2307 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2308 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2309 goto fail;
2310 }
2311
2312 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2313 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2314 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2315 goto fail;
2316 }
2317
2318 u->sink->parent.process_msg = sink_process_msg;
2319 if (u->use_tsched)
2320 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2321 u->sink->set_state = sink_set_state_cb;
2322 if (u->ucm_context)
2323 u->sink->set_port = sink_set_port_ucm_cb;
2324 else
2325 u->sink->set_port = sink_set_port_cb;
2326 if (u->sink->alternate_sample_rate)
2327 u->sink->update_rate = sink_update_rate_cb;
2328 u->sink->userdata = u;
2329
2330 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2331 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2332
2333 u->frame_size = frame_size;
2334 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2335 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2336 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2337
2338 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2339 (double) u->hwbuf_size / (double) u->fragment_size,
2340 (long unsigned) u->fragment_size,
2341 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2342 (long unsigned) u->hwbuf_size,
2343 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2344
2345 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2346 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2347 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2348 else {
2349 pa_log_info("Disabling rewind for device %s", u->device_name);
2350 pa_sink_set_max_rewind(u->sink, 0);
2351 }
2352
2353 if (u->use_tsched) {
2354 u->tsched_watermark_ref = tsched_watermark;
2355 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2356 } else
2357 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2358
2359 reserve_update(u);
2360
2361 if (update_sw_params(u) < 0)
2362 goto fail;
2363
2364 if (u->ucm_context) {
2365 if (u->sink->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, TRUE) < 0)
2366 goto fail;
2367 } else if (setup_mixer(u, ignore_dB) < 0)
2368 goto fail;
2369
2370 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2371
2372 thread_name = pa_sprintf_malloc("alsa-sink-%s", pa_strnull(pa_proplist_gets(u->sink->proplist, "alsa.id")));
2373 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2374 pa_log("Failed to create thread.");
2375 goto fail;
2376 }
2377 pa_xfree(thread_name);
2378 thread_name = NULL;
2379
2380 /* Get initial mixer settings */
2381 if (data.volume_is_set) {
2382 if (u->sink->set_volume)
2383 u->sink->set_volume(u->sink);
2384 } else {
2385 if (u->sink->get_volume)
2386 u->sink->get_volume(u->sink);
2387 }
2388
2389 if (data.muted_is_set) {
2390 if (u->sink->set_mute)
2391 u->sink->set_mute(u->sink);
2392 } else {
2393 if (u->sink->get_mute)
2394 u->sink->get_mute(u->sink);
2395 }
2396
2397 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2398 u->sink->write_volume(u->sink);
2399
2400 if (set_formats) {
2401 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2402 pa_format_info *format;
2403
2404 /* To start with, we only support PCM formats. Other formats may be added
2405 * with pa_sink_set_formats().*/
2406 format = pa_format_info_new();
2407 format->encoding = PA_ENCODING_PCM;
2408 u->formats = pa_idxset_new(NULL, NULL);
2409 pa_idxset_put(u->formats, format, NULL);
2410
2411 u->sink->get_formats = sink_get_formats;
2412 u->sink->set_formats = sink_set_formats;
2413 }
2414
2415 pa_sink_put(u->sink);
2416
2417 if (profile_set)
2418 pa_alsa_profile_set_free(profile_set);
2419
2420 return u->sink;
2421
2422 fail:
2423 pa_xfree(thread_name);
2424
2425 if (u)
2426 userdata_free(u);
2427
2428 if (profile_set)
2429 pa_alsa_profile_set_free(profile_set);
2430
2431 return NULL;
2432 }
2433
2434 static void userdata_free(struct userdata *u) {
2435 pa_assert(u);
2436
2437 if (u->sink)
2438 pa_sink_unlink(u->sink);
2439
2440 if (u->thread) {
2441 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2442 pa_thread_free(u->thread);
2443 }
2444
2445 pa_thread_mq_done(&u->thread_mq);
2446
2447 if (u->sink)
2448 pa_sink_unref(u->sink);
2449
2450 if (u->memchunk.memblock)
2451 pa_memblock_unref(u->memchunk.memblock);
2452
2453 if (u->mixer_pd)
2454 pa_alsa_mixer_pdata_free(u->mixer_pd);
2455
2456 if (u->alsa_rtpoll_item)
2457 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2458
2459 if (u->rtpoll)
2460 pa_rtpoll_free(u->rtpoll);
2461
2462 if (u->pcm_handle) {
2463 snd_pcm_drop(u->pcm_handle);
2464 snd_pcm_close(u->pcm_handle);
2465 }
2466
2467 if (u->mixer_fdl)
2468 pa_alsa_fdlist_free(u->mixer_fdl);
2469
2470 if (u->mixer_path && !u->mixer_path_set)
2471 pa_alsa_path_free(u->mixer_path);
2472
2473 if (u->mixer_handle)
2474 snd_mixer_close(u->mixer_handle);
2475
2476 if (u->smoother)
2477 pa_smoother_free(u->smoother);
2478
2479 if (u->formats)
2480 pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
2481
2482 if (u->rates)
2483 pa_xfree(u->rates);
2484
2485 reserve_done(u);
2486 monitor_done(u);
2487
2488 pa_xfree(u->device_name);
2489 pa_xfree(u->control_device);
2490 pa_xfree(u->paths_dir);
2491 pa_xfree(u);
2492 }
2493
2494 void pa_alsa_sink_free(pa_sink *s) {
2495 struct userdata *u;
2496
2497 pa_sink_assert_ref(s);
2498 pa_assert_se(u = s->userdata);
2499
2500 userdata_free(u);
2501 }