]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
sink, source: Always create a hashmap for ports.
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
34 #endif
35
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/volume.h>
39 #include <pulse/xmalloc.h>
40 #include <pulse/internal.h>
41
42 #include <pulsecore/core.h>
43 #include <pulsecore/i18n.h>
44 #include <pulsecore/module.h>
45 #include <pulsecore/memchunk.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/modargs.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/sample-util.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/macro.h>
53 #include <pulsecore/thread.h>
54 #include <pulsecore/thread-mq.h>
55 #include <pulsecore/rtpoll.h>
56 #include <pulsecore/time-smoother.h>
57
58 #include <modules/reserve-wrap.h>
59
60 #include "alsa-util.h"
61 #include "alsa-sink.h"
62
63 /* #define DEBUG_TIMING */
64
65 #define DEFAULT_DEVICE "default"
66
67 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
68 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69
70 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
71 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
72 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
73 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
74 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
75
76 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
77 * will increase the watermark only if we hit a real underrun. */
78
79 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
80 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
84
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
87
88 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
89
90 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
91 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
92
93 struct userdata {
94 pa_core *core;
95 pa_module *module;
96 pa_sink *sink;
97
98 pa_thread *thread;
99 pa_thread_mq thread_mq;
100 pa_rtpoll *rtpoll;
101
102 snd_pcm_t *pcm_handle;
103
104 char *paths_dir;
105 pa_alsa_fdlist *mixer_fdl;
106 pa_alsa_mixer_pdata *mixer_pd;
107 snd_mixer_t *mixer_handle;
108 pa_alsa_path_set *mixer_path_set;
109 pa_alsa_path *mixer_path;
110
111 pa_cvolume hardware_volume;
112
113 unsigned int *rates;
114
115 size_t
116 frame_size,
117 fragment_size,
118 hwbuf_size,
119 tsched_watermark,
120 tsched_watermark_ref,
121 hwbuf_unused,
122 min_sleep,
123 min_wakeup,
124 watermark_inc_step,
125 watermark_dec_step,
126 watermark_inc_threshold,
127 watermark_dec_threshold,
128 rewind_safeguard;
129
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132
133 pa_memchunk memchunk;
134
135 char *device_name; /* name of the PCM device */
136 char *control_device; /* name of the control device */
137
138 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
139
140 pa_bool_t first, after_rewind;
141
142 pa_rtpoll_item *alsa_rtpoll_item;
143
144 pa_smoother *smoother;
145 uint64_t write_count;
146 uint64_t since_start;
147 pa_usec_t smoother_interval;
148 pa_usec_t last_smoother_update;
149
150 pa_idxset *formats;
151
152 pa_reserve_wrapper *reserve;
153 pa_hook_slot *reserve_slot;
154 pa_reserve_monitor_wrapper *monitor;
155 pa_hook_slot *monitor_slot;
156 };
157
158 static void userdata_free(struct userdata *u);
159
160 /* FIXME: Is there a better way to do this than device names? */
161 static pa_bool_t is_iec958(struct userdata *u) {
162 return (strncmp("iec958", u->device_name, 6) == 0);
163 }
164
165 static pa_bool_t is_hdmi(struct userdata *u) {
166 return (strncmp("hdmi", u->device_name, 4) == 0);
167 }
168
169 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
170 pa_assert(r);
171 pa_assert(u);
172
173 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
174 return PA_HOOK_CANCEL;
175
176 return PA_HOOK_OK;
177 }
178
179 static void reserve_done(struct userdata *u) {
180 pa_assert(u);
181
182 if (u->reserve_slot) {
183 pa_hook_slot_free(u->reserve_slot);
184 u->reserve_slot = NULL;
185 }
186
187 if (u->reserve) {
188 pa_reserve_wrapper_unref(u->reserve);
189 u->reserve = NULL;
190 }
191 }
192
193 static void reserve_update(struct userdata *u) {
194 const char *description;
195 pa_assert(u);
196
197 if (!u->sink || !u->reserve)
198 return;
199
200 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
201 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
202 }
203
204 static int reserve_init(struct userdata *u, const char *dname) {
205 char *rname;
206
207 pa_assert(u);
208 pa_assert(dname);
209
210 if (u->reserve)
211 return 0;
212
213 if (pa_in_system_mode())
214 return 0;
215
216 if (!(rname = pa_alsa_get_reserve_name(dname)))
217 return 0;
218
219 /* We are resuming, try to lock the device */
220 u->reserve = pa_reserve_wrapper_get(u->core, rname);
221 pa_xfree(rname);
222
223 if (!(u->reserve))
224 return -1;
225
226 reserve_update(u);
227
228 pa_assert(!u->reserve_slot);
229 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
230
231 return 0;
232 }
233
234 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
235 pa_bool_t b;
236
237 pa_assert(w);
238 pa_assert(u);
239
240 b = PA_PTR_TO_UINT(busy) && !u->reserve;
241
242 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
243 return PA_HOOK_OK;
244 }
245
246 static void monitor_done(struct userdata *u) {
247 pa_assert(u);
248
249 if (u->monitor_slot) {
250 pa_hook_slot_free(u->monitor_slot);
251 u->monitor_slot = NULL;
252 }
253
254 if (u->monitor) {
255 pa_reserve_monitor_wrapper_unref(u->monitor);
256 u->monitor = NULL;
257 }
258 }
259
260 static int reserve_monitor_init(struct userdata *u, const char *dname) {
261 char *rname;
262
263 pa_assert(u);
264 pa_assert(dname);
265
266 if (pa_in_system_mode())
267 return 0;
268
269 if (!(rname = pa_alsa_get_reserve_name(dname)))
270 return 0;
271
272 /* We are resuming, try to lock the device */
273 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
274 pa_xfree(rname);
275
276 if (!(u->monitor))
277 return -1;
278
279 pa_assert(!u->monitor_slot);
280 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
281
282 return 0;
283 }
284
285 static void fix_min_sleep_wakeup(struct userdata *u) {
286 size_t max_use, max_use_2;
287
288 pa_assert(u);
289 pa_assert(u->use_tsched);
290
291 max_use = u->hwbuf_size - u->hwbuf_unused;
292 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
293
294 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
295 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
296
297 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
298 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
299 }
300
301 static void fix_tsched_watermark(struct userdata *u) {
302 size_t max_use;
303 pa_assert(u);
304 pa_assert(u->use_tsched);
305
306 max_use = u->hwbuf_size - u->hwbuf_unused;
307
308 if (u->tsched_watermark > max_use - u->min_sleep)
309 u->tsched_watermark = max_use - u->min_sleep;
310
311 if (u->tsched_watermark < u->min_wakeup)
312 u->tsched_watermark = u->min_wakeup;
313 }
314
315 static void increase_watermark(struct userdata *u) {
316 size_t old_watermark;
317 pa_usec_t old_min_latency, new_min_latency;
318
319 pa_assert(u);
320 pa_assert(u->use_tsched);
321
322 /* First, just try to increase the watermark */
323 old_watermark = u->tsched_watermark;
324 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
325 fix_tsched_watermark(u);
326
327 if (old_watermark != u->tsched_watermark) {
328 pa_log_info("Increasing wakeup watermark to %0.2f ms",
329 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
330 return;
331 }
332
333 /* Hmm, we cannot increase the watermark any further, hence let's
334 raise the latency, unless doing so was disabled in
335 configuration */
336 if (u->fixed_latency_range)
337 return;
338
339 old_min_latency = u->sink->thread_info.min_latency;
340 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
341 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
342
343 if (old_min_latency != new_min_latency) {
344 pa_log_info("Increasing minimal latency to %0.2f ms",
345 (double) new_min_latency / PA_USEC_PER_MSEC);
346
347 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
348 }
349
350 /* When we reach this we're officialy fucked! */
351 }
352
353 static void decrease_watermark(struct userdata *u) {
354 size_t old_watermark;
355 pa_usec_t now;
356
357 pa_assert(u);
358 pa_assert(u->use_tsched);
359
360 now = pa_rtclock_now();
361
362 if (u->watermark_dec_not_before <= 0)
363 goto restart;
364
365 if (u->watermark_dec_not_before > now)
366 return;
367
368 old_watermark = u->tsched_watermark;
369
370 if (u->tsched_watermark < u->watermark_dec_step)
371 u->tsched_watermark = u->tsched_watermark / 2;
372 else
373 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
374
375 fix_tsched_watermark(u);
376
377 if (old_watermark != u->tsched_watermark)
378 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
379 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
380
381 /* We don't change the latency range*/
382
383 restart:
384 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
385 }
386
387 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
388 pa_usec_t usec, wm;
389
390 pa_assert(sleep_usec);
391 pa_assert(process_usec);
392
393 pa_assert(u);
394 pa_assert(u->use_tsched);
395
396 usec = pa_sink_get_requested_latency_within_thread(u->sink);
397
398 if (usec == (pa_usec_t) -1)
399 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
400
401 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
402
403 if (wm > usec)
404 wm = usec/2;
405
406 *sleep_usec = usec - wm;
407 *process_usec = wm;
408
409 #ifdef DEBUG_TIMING
410 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
411 (unsigned long) (usec / PA_USEC_PER_MSEC),
412 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
413 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
414 #endif
415 }
416
417 static int try_recover(struct userdata *u, const char *call, int err) {
418 pa_assert(u);
419 pa_assert(call);
420 pa_assert(err < 0);
421
422 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
423
424 pa_assert(err != -EAGAIN);
425
426 if (err == -EPIPE)
427 pa_log_debug("%s: Buffer underrun!", call);
428
429 if (err == -ESTRPIPE)
430 pa_log_debug("%s: System suspended!", call);
431
432 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
433 pa_log("%s: %s", call, pa_alsa_strerror(err));
434 return -1;
435 }
436
437 u->first = TRUE;
438 u->since_start = 0;
439 return 0;
440 }
441
442 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
443 size_t left_to_play;
444 pa_bool_t underrun = FALSE;
445
446 /* We use <= instead of < for this check here because an underrun
447 * only happens after the last sample was processed, not already when
448 * it is removed from the buffer. This is particularly important
449 * when block transfer is used. */
450
451 if (n_bytes <= u->hwbuf_size)
452 left_to_play = u->hwbuf_size - n_bytes;
453 else {
454
455 /* We got a dropout. What a mess! */
456 left_to_play = 0;
457 underrun = TRUE;
458
459 #if 0
460 PA_DEBUG_TRAP;
461 #endif
462
463 if (!u->first && !u->after_rewind)
464 if (pa_log_ratelimit(PA_LOG_INFO))
465 pa_log_info("Underrun!");
466 }
467
468 #ifdef DEBUG_TIMING
469 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
470 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
471 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
472 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
473 #endif
474
475 if (u->use_tsched) {
476 pa_bool_t reset_not_before = TRUE;
477
478 if (!u->first && !u->after_rewind) {
479 if (underrun || left_to_play < u->watermark_inc_threshold)
480 increase_watermark(u);
481 else if (left_to_play > u->watermark_dec_threshold) {
482 reset_not_before = FALSE;
483
484 /* We decrease the watermark only if have actually
485 * been woken up by a timeout. If something else woke
486 * us up it's too easy to fulfill the deadlines... */
487
488 if (on_timeout)
489 decrease_watermark(u);
490 }
491 }
492
493 if (reset_not_before)
494 u->watermark_dec_not_before = 0;
495 }
496
497 return left_to_play;
498 }
499
500 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
501 pa_bool_t work_done = FALSE;
502 pa_usec_t max_sleep_usec = 0, process_usec = 0;
503 size_t left_to_play;
504 unsigned j = 0;
505
506 pa_assert(u);
507 pa_sink_assert_ref(u->sink);
508
509 if (u->use_tsched)
510 hw_sleep_time(u, &max_sleep_usec, &process_usec);
511
512 for (;;) {
513 snd_pcm_sframes_t n;
514 size_t n_bytes;
515 int r;
516 pa_bool_t after_avail = TRUE;
517
518 /* First we determine how many samples are missing to fill the
519 * buffer up to 100% */
520
521 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
522
523 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
524 continue;
525
526 return r;
527 }
528
529 n_bytes = (size_t) n * u->frame_size;
530
531 #ifdef DEBUG_TIMING
532 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
533 #endif
534
535 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
536 on_timeout = FALSE;
537
538 if (u->use_tsched)
539
540 /* We won't fill up the playback buffer before at least
541 * half the sleep time is over because otherwise we might
542 * ask for more data from the clients then they expect. We
543 * need to guarantee that clients only have to keep around
544 * a single hw buffer length. */
545
546 if (!polled &&
547 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
548 #ifdef DEBUG_TIMING
549 pa_log_debug("Not filling up, because too early.");
550 #endif
551 break;
552 }
553
554 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
555
556 if (polled)
557 PA_ONCE_BEGIN {
558 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
559 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
560 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
561 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
562 pa_strnull(dn));
563 pa_xfree(dn);
564 } PA_ONCE_END;
565
566 #ifdef DEBUG_TIMING
567 pa_log_debug("Not filling up, because not necessary.");
568 #endif
569 break;
570 }
571
572
573 if (++j > 10) {
574 #ifdef DEBUG_TIMING
575 pa_log_debug("Not filling up, because already too many iterations.");
576 #endif
577
578 break;
579 }
580
581 n_bytes -= u->hwbuf_unused;
582 polled = FALSE;
583
584 #ifdef DEBUG_TIMING
585 pa_log_debug("Filling up");
586 #endif
587
588 for (;;) {
589 pa_memchunk chunk;
590 void *p;
591 int err;
592 const snd_pcm_channel_area_t *areas;
593 snd_pcm_uframes_t offset, frames;
594 snd_pcm_sframes_t sframes;
595
596 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
597 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
598
599 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
600
601 if (!after_avail && err == -EAGAIN)
602 break;
603
604 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
605 continue;
606
607 return r;
608 }
609
610 /* Make sure that if these memblocks need to be copied they will fit into one slot */
611 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
612 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
613
614 if (!after_avail && frames == 0)
615 break;
616
617 pa_assert(frames > 0);
618 after_avail = FALSE;
619
620 /* Check these are multiples of 8 bit */
621 pa_assert((areas[0].first & 7) == 0);
622 pa_assert((areas[0].step & 7)== 0);
623
624 /* We assume a single interleaved memory buffer */
625 pa_assert((areas[0].first >> 3) == 0);
626 pa_assert((areas[0].step >> 3) == u->frame_size);
627
628 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
629
630 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
631 chunk.length = pa_memblock_get_length(chunk.memblock);
632 chunk.index = 0;
633
634 pa_sink_render_into_full(u->sink, &chunk);
635 pa_memblock_unref_fixed(chunk.memblock);
636
637 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
638
639 if (!after_avail && (int) sframes == -EAGAIN)
640 break;
641
642 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
643 continue;
644
645 return r;
646 }
647
648 work_done = TRUE;
649
650 u->write_count += frames * u->frame_size;
651 u->since_start += frames * u->frame_size;
652
653 #ifdef DEBUG_TIMING
654 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
655 #endif
656
657 if ((size_t) frames * u->frame_size >= n_bytes)
658 break;
659
660 n_bytes -= (size_t) frames * u->frame_size;
661 }
662 }
663
664 if (u->use_tsched) {
665 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
666 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
667
668 if (*sleep_usec > process_usec)
669 *sleep_usec -= process_usec;
670 else
671 *sleep_usec = 0;
672 } else
673 *sleep_usec = 0;
674
675 return work_done ? 1 : 0;
676 }
677
678 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
679 pa_bool_t work_done = FALSE;
680 pa_usec_t max_sleep_usec = 0, process_usec = 0;
681 size_t left_to_play;
682 unsigned j = 0;
683
684 pa_assert(u);
685 pa_sink_assert_ref(u->sink);
686
687 if (u->use_tsched)
688 hw_sleep_time(u, &max_sleep_usec, &process_usec);
689
690 for (;;) {
691 snd_pcm_sframes_t n;
692 size_t n_bytes;
693 int r;
694 pa_bool_t after_avail = TRUE;
695
696 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
697
698 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
699 continue;
700
701 return r;
702 }
703
704 n_bytes = (size_t) n * u->frame_size;
705 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
706 on_timeout = FALSE;
707
708 if (u->use_tsched)
709
710 /* We won't fill up the playback buffer before at least
711 * half the sleep time is over because otherwise we might
712 * ask for more data from the clients then they expect. We
713 * need to guarantee that clients only have to keep around
714 * a single hw buffer length. */
715
716 if (!polled &&
717 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
718 break;
719
720 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
721
722 if (polled)
723 PA_ONCE_BEGIN {
724 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
725 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
726 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
727 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
728 pa_strnull(dn));
729 pa_xfree(dn);
730 } PA_ONCE_END;
731
732 break;
733 }
734
735 if (++j > 10) {
736 #ifdef DEBUG_TIMING
737 pa_log_debug("Not filling up, because already too many iterations.");
738 #endif
739
740 break;
741 }
742
743 n_bytes -= u->hwbuf_unused;
744 polled = FALSE;
745
746 for (;;) {
747 snd_pcm_sframes_t frames;
748 void *p;
749
750 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
751
752 if (u->memchunk.length <= 0)
753 pa_sink_render(u->sink, n_bytes, &u->memchunk);
754
755 pa_assert(u->memchunk.length > 0);
756
757 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
758
759 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
760 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
761
762 p = pa_memblock_acquire(u->memchunk.memblock);
763 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
764 pa_memblock_release(u->memchunk.memblock);
765
766 if (PA_UNLIKELY(frames < 0)) {
767
768 if (!after_avail && (int) frames == -EAGAIN)
769 break;
770
771 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
772 continue;
773
774 return r;
775 }
776
777 if (!after_avail && frames == 0)
778 break;
779
780 pa_assert(frames > 0);
781 after_avail = FALSE;
782
783 u->memchunk.index += (size_t) frames * u->frame_size;
784 u->memchunk.length -= (size_t) frames * u->frame_size;
785
786 if (u->memchunk.length <= 0) {
787 pa_memblock_unref(u->memchunk.memblock);
788 pa_memchunk_reset(&u->memchunk);
789 }
790
791 work_done = TRUE;
792
793 u->write_count += frames * u->frame_size;
794 u->since_start += frames * u->frame_size;
795
796 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
797
798 if ((size_t) frames * u->frame_size >= n_bytes)
799 break;
800
801 n_bytes -= (size_t) frames * u->frame_size;
802 }
803 }
804
805 if (u->use_tsched) {
806 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
807 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
808
809 if (*sleep_usec > process_usec)
810 *sleep_usec -= process_usec;
811 else
812 *sleep_usec = 0;
813 } else
814 *sleep_usec = 0;
815
816 return work_done ? 1 : 0;
817 }
818
819 static void update_smoother(struct userdata *u) {
820 snd_pcm_sframes_t delay = 0;
821 int64_t position;
822 int err;
823 pa_usec_t now1 = 0, now2;
824 snd_pcm_status_t *status;
825
826 snd_pcm_status_alloca(&status);
827
828 pa_assert(u);
829 pa_assert(u->pcm_handle);
830
831 /* Let's update the time smoother */
832
833 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
834 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
835 return;
836 }
837
838 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
839 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
840 else {
841 snd_htimestamp_t htstamp = { 0, 0 };
842 snd_pcm_status_get_htstamp(status, &htstamp);
843 now1 = pa_timespec_load(&htstamp);
844 }
845
846 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
847 if (now1 <= 0)
848 now1 = pa_rtclock_now();
849
850 /* check if the time since the last update is bigger than the interval */
851 if (u->last_smoother_update > 0)
852 if (u->last_smoother_update + u->smoother_interval > now1)
853 return;
854
855 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
856
857 if (PA_UNLIKELY(position < 0))
858 position = 0;
859
860 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
861
862 pa_smoother_put(u->smoother, now1, now2);
863
864 u->last_smoother_update = now1;
865 /* exponentially increase the update interval up to the MAX limit */
866 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
867 }
868
869 static pa_usec_t sink_get_latency(struct userdata *u) {
870 pa_usec_t r;
871 int64_t delay;
872 pa_usec_t now1, now2;
873
874 pa_assert(u);
875
876 now1 = pa_rtclock_now();
877 now2 = pa_smoother_get(u->smoother, now1);
878
879 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
880
881 r = delay >= 0 ? (pa_usec_t) delay : 0;
882
883 if (u->memchunk.memblock)
884 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
885
886 return r;
887 }
888
889 static int build_pollfd(struct userdata *u) {
890 pa_assert(u);
891 pa_assert(u->pcm_handle);
892
893 if (u->alsa_rtpoll_item)
894 pa_rtpoll_item_free(u->alsa_rtpoll_item);
895
896 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
897 return -1;
898
899 return 0;
900 }
901
902 /* Called from IO context */
903 static int suspend(struct userdata *u) {
904 pa_assert(u);
905 pa_assert(u->pcm_handle);
906
907 pa_smoother_pause(u->smoother, pa_rtclock_now());
908
909 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
910 * take awfully long with our long buffer sizes today. */
911 snd_pcm_close(u->pcm_handle);
912 u->pcm_handle = NULL;
913
914 if (u->alsa_rtpoll_item) {
915 pa_rtpoll_item_free(u->alsa_rtpoll_item);
916 u->alsa_rtpoll_item = NULL;
917 }
918
919 /* We reset max_rewind/max_request here to make sure that while we
920 * are suspended the old max_request/max_rewind values set before
921 * the suspend can influence the per-stream buffer of newly
922 * created streams, without their requirements having any
923 * influence on them. */
924 pa_sink_set_max_rewind_within_thread(u->sink, 0);
925 pa_sink_set_max_request_within_thread(u->sink, 0);
926
927 pa_log_info("Device suspended...");
928
929 return 0;
930 }
931
932 /* Called from IO context */
933 static int update_sw_params(struct userdata *u) {
934 snd_pcm_uframes_t avail_min;
935 int err;
936
937 pa_assert(u);
938
939 /* Use the full buffer if no one asked us for anything specific */
940 u->hwbuf_unused = 0;
941
942 if (u->use_tsched) {
943 pa_usec_t latency;
944
945 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
946 size_t b;
947
948 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
949
950 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
951
952 /* We need at least one sample in our buffer */
953
954 if (PA_UNLIKELY(b < u->frame_size))
955 b = u->frame_size;
956
957 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
958 }
959
960 fix_min_sleep_wakeup(u);
961 fix_tsched_watermark(u);
962 }
963
964 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
965
966 /* We need at last one frame in the used part of the buffer */
967 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
968
969 if (u->use_tsched) {
970 pa_usec_t sleep_usec, process_usec;
971
972 hw_sleep_time(u, &sleep_usec, &process_usec);
973 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
974 }
975
976 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
977
978 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
979 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
980 return err;
981 }
982
983 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
984 if (pa_alsa_pcm_is_hw(u->pcm_handle))
985 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
986 else {
987 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
988 pa_sink_set_max_rewind_within_thread(u->sink, 0);
989 }
990
991 return 0;
992 }
993
994 /* Called from IO Context on unsuspend or from main thread when creating sink */
995 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
996 pa_bool_t in_thread)
997 {
998 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
999 &u->sink->sample_spec);
1000
1001 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1002 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1003
1004 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1005 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1006
1007 fix_min_sleep_wakeup(u);
1008 fix_tsched_watermark(u);
1009
1010 if (in_thread)
1011 pa_sink_set_latency_range_within_thread(u->sink,
1012 u->min_latency_ref,
1013 pa_bytes_to_usec(u->hwbuf_size, ss));
1014 else {
1015 pa_sink_set_latency_range(u->sink,
1016 0,
1017 pa_bytes_to_usec(u->hwbuf_size, ss));
1018
1019 /* work-around assert in pa_sink_set_latency_within_thead,
1020 keep track of min_latency and reuse it when
1021 this routine is called from IO context */
1022 u->min_latency_ref = u->sink->thread_info.min_latency;
1023 }
1024
1025 pa_log_info("Time scheduling watermark is %0.2fms",
1026 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1027 }
1028
1029 /* Called from IO context */
1030 static int unsuspend(struct userdata *u) {
1031 pa_sample_spec ss;
1032 int err;
1033 pa_bool_t b, d;
1034 snd_pcm_uframes_t period_size, buffer_size;
1035 char *device_name = NULL;
1036
1037 pa_assert(u);
1038 pa_assert(!u->pcm_handle);
1039
1040 pa_log_info("Trying resume...");
1041
1042 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1043 /* Need to open device in NONAUDIO mode */
1044 int len = strlen(u->device_name) + 8;
1045
1046 device_name = pa_xmalloc(len);
1047 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1048 }
1049
1050 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1051 SND_PCM_NONBLOCK|
1052 SND_PCM_NO_AUTO_RESAMPLE|
1053 SND_PCM_NO_AUTO_CHANNELS|
1054 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1055 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1056 goto fail;
1057 }
1058
1059 ss = u->sink->sample_spec;
1060 period_size = u->fragment_size / u->frame_size;
1061 buffer_size = u->hwbuf_size / u->frame_size;
1062 b = u->use_mmap;
1063 d = u->use_tsched;
1064
1065 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1066 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1067 goto fail;
1068 }
1069
1070 if (b != u->use_mmap || d != u->use_tsched) {
1071 pa_log_warn("Resume failed, couldn't get original access mode.");
1072 goto fail;
1073 }
1074
1075 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1076 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1077 goto fail;
1078 }
1079
1080 if (period_size*u->frame_size != u->fragment_size ||
1081 buffer_size*u->frame_size != u->hwbuf_size) {
1082 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1083 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1084 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1085 goto fail;
1086 }
1087
1088 if (update_sw_params(u) < 0)
1089 goto fail;
1090
1091 if (build_pollfd(u) < 0)
1092 goto fail;
1093
1094 u->write_count = 0;
1095 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1096 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1097 u->last_smoother_update = 0;
1098
1099 u->first = TRUE;
1100 u->since_start = 0;
1101
1102 /* reset the watermark to the value defined when sink was created */
1103 if (u->use_tsched)
1104 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1105
1106 pa_log_info("Resumed successfully...");
1107
1108 pa_xfree(device_name);
1109 return 0;
1110
1111 fail:
1112 if (u->pcm_handle) {
1113 snd_pcm_close(u->pcm_handle);
1114 u->pcm_handle = NULL;
1115 }
1116
1117 pa_xfree(device_name);
1118
1119 return -PA_ERR_IO;
1120 }
1121
1122 /* Called from IO context */
1123 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1124 struct userdata *u = PA_SINK(o)->userdata;
1125
1126 switch (code) {
1127
1128 case PA_SINK_MESSAGE_GET_LATENCY: {
1129 pa_usec_t r = 0;
1130
1131 if (u->pcm_handle)
1132 r = sink_get_latency(u);
1133
1134 *((pa_usec_t*) data) = r;
1135
1136 return 0;
1137 }
1138
1139 case PA_SINK_MESSAGE_SET_STATE:
1140
1141 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1142
1143 case PA_SINK_SUSPENDED: {
1144 int r;
1145
1146 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1147
1148 if ((r = suspend(u)) < 0)
1149 return r;
1150
1151 break;
1152 }
1153
1154 case PA_SINK_IDLE:
1155 case PA_SINK_RUNNING: {
1156 int r;
1157
1158 if (u->sink->thread_info.state == PA_SINK_INIT) {
1159 if (build_pollfd(u) < 0)
1160 return -PA_ERR_IO;
1161 }
1162
1163 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1164 if ((r = unsuspend(u)) < 0)
1165 return r;
1166 }
1167
1168 break;
1169 }
1170
1171 case PA_SINK_UNLINKED:
1172 case PA_SINK_INIT:
1173 case PA_SINK_INVALID_STATE:
1174 ;
1175 }
1176
1177 break;
1178 }
1179
1180 return pa_sink_process_msg(o, code, data, offset, chunk);
1181 }
1182
1183 /* Called from main context */
1184 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1185 pa_sink_state_t old_state;
1186 struct userdata *u;
1187
1188 pa_sink_assert_ref(s);
1189 pa_assert_se(u = s->userdata);
1190
1191 old_state = pa_sink_get_state(u->sink);
1192
1193 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1194 reserve_done(u);
1195 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1196 if (reserve_init(u, u->device_name) < 0)
1197 return -PA_ERR_BUSY;
1198
1199 return 0;
1200 }
1201
1202 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1203 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1204
1205 pa_assert(u);
1206 pa_assert(u->mixer_handle);
1207
1208 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1209 return 0;
1210
1211 if (!PA_SINK_IS_LINKED(u->sink->state))
1212 return 0;
1213
1214 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1215 pa_sink_set_mixer_dirty(u->sink, TRUE);
1216 return 0;
1217 }
1218
1219 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1220 pa_sink_get_volume(u->sink, TRUE);
1221 pa_sink_get_mute(u->sink, TRUE);
1222 }
1223
1224 return 0;
1225 }
1226
1227 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1228 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1229
1230 pa_assert(u);
1231 pa_assert(u->mixer_handle);
1232
1233 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1234 return 0;
1235
1236 if (u->sink->suspend_cause & PA_SUSPEND_SESSION) {
1237 pa_sink_set_mixer_dirty(u->sink, TRUE);
1238 return 0;
1239 }
1240
1241 if (mask & SND_CTL_EVENT_MASK_VALUE)
1242 pa_sink_update_volume_and_mute(u->sink);
1243
1244 return 0;
1245 }
1246
1247 static void sink_get_volume_cb(pa_sink *s) {
1248 struct userdata *u = s->userdata;
1249 pa_cvolume r;
1250 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1251
1252 pa_assert(u);
1253 pa_assert(u->mixer_path);
1254 pa_assert(u->mixer_handle);
1255
1256 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1257 return;
1258
1259 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1260 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1261
1262 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1263
1264 if (u->mixer_path->has_dB) {
1265 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1266
1267 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1268 }
1269
1270 if (pa_cvolume_equal(&u->hardware_volume, &r))
1271 return;
1272
1273 s->real_volume = u->hardware_volume = r;
1274
1275 /* Hmm, so the hardware volume changed, let's reset our software volume */
1276 if (u->mixer_path->has_dB)
1277 pa_sink_set_soft_volume(s, NULL);
1278 }
1279
1280 static void sink_set_volume_cb(pa_sink *s) {
1281 struct userdata *u = s->userdata;
1282 pa_cvolume r;
1283 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1284 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1285
1286 pa_assert(u);
1287 pa_assert(u->mixer_path);
1288 pa_assert(u->mixer_handle);
1289
1290 /* Shift up by the base volume */
1291 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1292
1293 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1294 return;
1295
1296 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1297 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1298
1299 u->hardware_volume = r;
1300
1301 if (u->mixer_path->has_dB) {
1302 pa_cvolume new_soft_volume;
1303 pa_bool_t accurate_enough;
1304 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1305
1306 /* Match exactly what the user requested by software */
1307 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1308
1309 /* If the adjustment to do in software is only minimal we
1310 * can skip it. That saves us CPU at the expense of a bit of
1311 * accuracy */
1312 accurate_enough =
1313 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1314 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1315
1316 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1317 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1318 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1319 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1320 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1321 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1322 pa_yes_no(accurate_enough));
1323 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1324
1325 if (!accurate_enough)
1326 s->soft_volume = new_soft_volume;
1327
1328 } else {
1329 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1330
1331 /* We can't match exactly what the user requested, hence let's
1332 * at least tell the user about it */
1333
1334 s->real_volume = r;
1335 }
1336 }
1337
1338 static void sink_write_volume_cb(pa_sink *s) {
1339 struct userdata *u = s->userdata;
1340 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1341
1342 pa_assert(u);
1343 pa_assert(u->mixer_path);
1344 pa_assert(u->mixer_handle);
1345 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1346
1347 /* Shift up by the base volume */
1348 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1349
1350 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1351 pa_log_error("Writing HW volume failed");
1352 else {
1353 pa_cvolume tmp_vol;
1354 pa_bool_t accurate_enough;
1355
1356 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1357 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1358
1359 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1360 accurate_enough =
1361 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1362 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1363
1364 if (!accurate_enough) {
1365 union {
1366 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1367 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1368 } vol;
1369
1370 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1371 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1372 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1373 pa_log_debug(" in dB: %s (request) != %s",
1374 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1375 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1376 }
1377 }
1378 }
1379
1380 static void sink_get_mute_cb(pa_sink *s) {
1381 struct userdata *u = s->userdata;
1382 pa_bool_t b;
1383
1384 pa_assert(u);
1385 pa_assert(u->mixer_path);
1386 pa_assert(u->mixer_handle);
1387
1388 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1389 return;
1390
1391 s->muted = b;
1392 }
1393
1394 static void sink_set_mute_cb(pa_sink *s) {
1395 struct userdata *u = s->userdata;
1396
1397 pa_assert(u);
1398 pa_assert(u->mixer_path);
1399 pa_assert(u->mixer_handle);
1400
1401 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1402 }
1403
1404 static void mixer_volume_init(struct userdata *u) {
1405 pa_assert(u);
1406
1407 if (!u->mixer_path->has_volume) {
1408 pa_sink_set_write_volume_callback(u->sink, NULL);
1409 pa_sink_set_get_volume_callback(u->sink, NULL);
1410 pa_sink_set_set_volume_callback(u->sink, NULL);
1411
1412 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1413 } else {
1414 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1415 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1416
1417 if (u->mixer_path->has_dB && u->deferred_volume) {
1418 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1419 pa_log_info("Successfully enabled deferred volume.");
1420 } else
1421 pa_sink_set_write_volume_callback(u->sink, NULL);
1422
1423 if (u->mixer_path->has_dB) {
1424 pa_sink_enable_decibel_volume(u->sink, TRUE);
1425 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1426
1427 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1428 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1429
1430 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1431 } else {
1432 pa_sink_enable_decibel_volume(u->sink, FALSE);
1433 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1434
1435 u->sink->base_volume = PA_VOLUME_NORM;
1436 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1437 }
1438
1439 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1440 }
1441
1442 if (!u->mixer_path->has_mute) {
1443 pa_sink_set_get_mute_callback(u->sink, NULL);
1444 pa_sink_set_set_mute_callback(u->sink, NULL);
1445 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1446 } else {
1447 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1448 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1449 pa_log_info("Using hardware mute control.");
1450 }
1451 }
1452
1453 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1454 struct userdata *u = s->userdata;
1455 pa_alsa_port_data *data;
1456
1457 pa_assert(u);
1458 pa_assert(p);
1459 pa_assert(u->mixer_handle);
1460
1461 data = PA_DEVICE_PORT_DATA(p);
1462
1463 pa_assert_se(u->mixer_path = data->path);
1464 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1465
1466 mixer_volume_init(u);
1467
1468 if (data->setting)
1469 pa_alsa_setting_select(data->setting, u->mixer_handle);
1470
1471 if (s->set_mute)
1472 s->set_mute(s);
1473 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
1474 if (s->write_volume)
1475 s->write_volume(s);
1476 } else {
1477 if (s->set_volume)
1478 s->set_volume(s);
1479 }
1480
1481 return 0;
1482 }
1483
1484 static void sink_update_requested_latency_cb(pa_sink *s) {
1485 struct userdata *u = s->userdata;
1486 size_t before;
1487 pa_assert(u);
1488 pa_assert(u->use_tsched); /* only when timer scheduling is used
1489 * we can dynamically adjust the
1490 * latency */
1491
1492 if (!u->pcm_handle)
1493 return;
1494
1495 before = u->hwbuf_unused;
1496 update_sw_params(u);
1497
1498 /* Let's check whether we now use only a smaller part of the
1499 buffer then before. If so, we need to make sure that subsequent
1500 rewinds are relative to the new maximum fill level and not to the
1501 current fill level. Thus, let's do a full rewind once, to clear
1502 things up. */
1503
1504 if (u->hwbuf_unused > before) {
1505 pa_log_debug("Requesting rewind due to latency change.");
1506 pa_sink_request_rewind(s, (size_t) -1);
1507 }
1508 }
1509
1510 static pa_idxset* sink_get_formats(pa_sink *s) {
1511 struct userdata *u = s->userdata;
1512 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1513 pa_format_info *f;
1514 uint32_t idx;
1515
1516 pa_assert(u);
1517
1518 PA_IDXSET_FOREACH(f, u->formats, idx) {
1519 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1520 }
1521
1522 return ret;
1523 }
1524
1525 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1526 struct userdata *u = s->userdata;
1527 pa_format_info *f, *g;
1528 uint32_t idx, n;
1529
1530 pa_assert(u);
1531
1532 /* FIXME: also validate sample rates against what the device supports */
1533 PA_IDXSET_FOREACH(f, formats, idx) {
1534 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1535 /* EAC3 cannot be sent over over S/PDIF */
1536 return FALSE;
1537 }
1538
1539 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1540 u->formats = pa_idxset_new(NULL, NULL);
1541
1542 /* Note: the logic below won't apply if we're using software encoding.
1543 * This is fine for now since we don't support that via the passthrough
1544 * framework, but this must be changed if we do. */
1545
1546 /* Count how many sample rates we support */
1547 for (idx = 0, n = 0; u->rates[idx]; idx++)
1548 n++;
1549
1550 /* First insert non-PCM formats since we prefer those. */
1551 PA_IDXSET_FOREACH(f, formats, idx) {
1552 if (!pa_format_info_is_pcm(f)) {
1553 g = pa_format_info_copy(f);
1554 pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->rates, n);
1555 pa_idxset_put(u->formats, g, NULL);
1556 }
1557 }
1558
1559 /* Now add any PCM formats */
1560 PA_IDXSET_FOREACH(f, formats, idx) {
1561 if (pa_format_info_is_pcm(f)) {
1562 /* We don't set rates here since we'll just tack on a resampler for
1563 * unsupported rates */
1564 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1565 }
1566 }
1567
1568 return TRUE;
1569 }
1570
1571 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1572 {
1573 struct userdata *u = s->userdata;
1574 int i;
1575 pa_bool_t supported = FALSE;
1576
1577 pa_assert(u);
1578
1579 for (i = 0; u->rates[i]; i++) {
1580 if (u->rates[i] == rate) {
1581 supported = TRUE;
1582 break;
1583 }
1584 }
1585
1586 if (!supported) {
1587 pa_log_info("Sink does not support sample rate of %d Hz", rate);
1588 return FALSE;
1589 }
1590
1591 if (!PA_SINK_IS_OPENED(s->state)) {
1592 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1593 u->sink->sample_spec.rate = rate;
1594 return TRUE;
1595 }
1596
1597 return FALSE;
1598 }
1599
1600 static int process_rewind(struct userdata *u) {
1601 snd_pcm_sframes_t unused;
1602 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1603 pa_assert(u);
1604
1605 /* Figure out how much we shall rewind and reset the counter */
1606 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1607
1608 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1609
1610 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1611 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1612 return -1;
1613 }
1614
1615 unused_nbytes = (size_t) unused * u->frame_size;
1616
1617 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1618 unused_nbytes += u->rewind_safeguard;
1619
1620 if (u->hwbuf_size > unused_nbytes)
1621 limit_nbytes = u->hwbuf_size - unused_nbytes;
1622 else
1623 limit_nbytes = 0;
1624
1625 if (rewind_nbytes > limit_nbytes)
1626 rewind_nbytes = limit_nbytes;
1627
1628 if (rewind_nbytes > 0) {
1629 snd_pcm_sframes_t in_frames, out_frames;
1630
1631 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1632
1633 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1634 pa_log_debug("before: %lu", (unsigned long) in_frames);
1635 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1636 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1637 if (try_recover(u, "process_rewind", out_frames) < 0)
1638 return -1;
1639 out_frames = 0;
1640 }
1641
1642 pa_log_debug("after: %lu", (unsigned long) out_frames);
1643
1644 rewind_nbytes = (size_t) out_frames * u->frame_size;
1645
1646 if (rewind_nbytes <= 0)
1647 pa_log_info("Tried rewind, but was apparently not possible.");
1648 else {
1649 u->write_count -= rewind_nbytes;
1650 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1651 pa_sink_process_rewind(u->sink, rewind_nbytes);
1652
1653 u->after_rewind = TRUE;
1654 return 0;
1655 }
1656 } else
1657 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1658
1659 pa_sink_process_rewind(u->sink, 0);
1660 return 0;
1661 }
1662
1663 static void thread_func(void *userdata) {
1664 struct userdata *u = userdata;
1665 unsigned short revents = 0;
1666
1667 pa_assert(u);
1668
1669 pa_log_debug("Thread starting up");
1670
1671 if (u->core->realtime_scheduling)
1672 pa_make_realtime(u->core->realtime_priority);
1673
1674 pa_thread_mq_install(&u->thread_mq);
1675
1676 for (;;) {
1677 int ret;
1678 pa_usec_t rtpoll_sleep = 0;
1679
1680 #ifdef DEBUG_TIMING
1681 pa_log_debug("Loop");
1682 #endif
1683
1684 /* Render some data and write it to the dsp */
1685 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1686 int work_done;
1687 pa_usec_t sleep_usec = 0;
1688 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1689
1690 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1691 if (process_rewind(u) < 0)
1692 goto fail;
1693
1694 if (u->use_mmap)
1695 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1696 else
1697 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1698
1699 if (work_done < 0)
1700 goto fail;
1701
1702 /* pa_log_debug("work_done = %i", work_done); */
1703
1704 if (work_done) {
1705
1706 if (u->first) {
1707 pa_log_info("Starting playback.");
1708 snd_pcm_start(u->pcm_handle);
1709
1710 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1711
1712 u->first = FALSE;
1713 }
1714
1715 update_smoother(u);
1716 }
1717
1718 if (u->use_tsched) {
1719 pa_usec_t cusec;
1720
1721 if (u->since_start <= u->hwbuf_size) {
1722
1723 /* USB devices on ALSA seem to hit a buffer
1724 * underrun during the first iterations much
1725 * quicker then we calculate here, probably due to
1726 * the transport latency. To accommodate for that
1727 * we artificially decrease the sleep time until
1728 * we have filled the buffer at least once
1729 * completely.*/
1730
1731 if (pa_log_ratelimit(PA_LOG_DEBUG))
1732 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1733 sleep_usec /= 2;
1734 }
1735
1736 /* OK, the playback buffer is now full, let's
1737 * calculate when to wake up next */
1738 #ifdef DEBUG_TIMING
1739 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1740 #endif
1741
1742 /* Convert from the sound card time domain to the
1743 * system time domain */
1744 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1745
1746 #ifdef DEBUG_TIMING
1747 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1748 #endif
1749
1750 /* We don't trust the conversion, so we wake up whatever comes first */
1751 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1752 }
1753
1754 u->after_rewind = FALSE;
1755
1756 }
1757
1758 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1759 pa_usec_t volume_sleep;
1760 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1761 if (volume_sleep > 0) {
1762 if (rtpoll_sleep > 0)
1763 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1764 else
1765 rtpoll_sleep = volume_sleep;
1766 }
1767 }
1768
1769 if (rtpoll_sleep > 0)
1770 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1771 else
1772 pa_rtpoll_set_timer_disabled(u->rtpoll);
1773
1774 /* Hmm, nothing to do. Let's sleep */
1775 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1776 goto fail;
1777
1778 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1779 pa_sink_volume_change_apply(u->sink, NULL);
1780
1781 if (ret == 0)
1782 goto finish;
1783
1784 /* Tell ALSA about this and process its response */
1785 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1786 struct pollfd *pollfd;
1787 int err;
1788 unsigned n;
1789
1790 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1791
1792 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1793 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1794 goto fail;
1795 }
1796
1797 if (revents & ~POLLOUT) {
1798 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1799 goto fail;
1800
1801 u->first = TRUE;
1802 u->since_start = 0;
1803 revents = 0;
1804 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1805 pa_log_debug("Wakeup from ALSA!");
1806
1807 } else
1808 revents = 0;
1809 }
1810
1811 fail:
1812 /* If this was no regular exit from the loop we have to continue
1813 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1814 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1815 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1816
1817 finish:
1818 pa_log_debug("Thread shutting down");
1819 }
1820
1821 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1822 const char *n;
1823 char *t;
1824
1825 pa_assert(data);
1826 pa_assert(ma);
1827 pa_assert(device_name);
1828
1829 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1830 pa_sink_new_data_set_name(data, n);
1831 data->namereg_fail = TRUE;
1832 return;
1833 }
1834
1835 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1836 data->namereg_fail = TRUE;
1837 else {
1838 n = device_id ? device_id : device_name;
1839 data->namereg_fail = FALSE;
1840 }
1841
1842 if (mapping)
1843 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1844 else
1845 t = pa_sprintf_malloc("alsa_output.%s", n);
1846
1847 pa_sink_new_data_set_name(data, t);
1848 pa_xfree(t);
1849 }
1850
1851 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1852 snd_hctl_t *hctl;
1853
1854 if (!mapping && !element)
1855 return;
1856
1857 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1858 pa_log_info("Failed to find a working mixer device.");
1859 return;
1860 }
1861
1862 if (element) {
1863
1864 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1865 goto fail;
1866
1867 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1868 goto fail;
1869
1870 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1871 pa_alsa_path_dump(u->mixer_path);
1872 } else if (!(u->mixer_path_set = mapping->output_path_set))
1873 goto fail;
1874
1875 return;
1876
1877 fail:
1878
1879 if (u->mixer_path) {
1880 pa_alsa_path_free(u->mixer_path);
1881 u->mixer_path = NULL;
1882 }
1883
1884 if (u->mixer_handle) {
1885 snd_mixer_close(u->mixer_handle);
1886 u->mixer_handle = NULL;
1887 }
1888 }
1889
1890
1891 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1892 pa_bool_t need_mixer_callback = FALSE;
1893
1894 pa_assert(u);
1895
1896 if (!u->mixer_handle)
1897 return 0;
1898
1899 if (u->sink->active_port) {
1900 pa_alsa_port_data *data;
1901
1902 /* We have a list of supported paths, so let's activate the
1903 * one that has been chosen as active */
1904
1905 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1906 u->mixer_path = data->path;
1907
1908 pa_alsa_path_select(data->path, u->mixer_handle);
1909
1910 if (data->setting)
1911 pa_alsa_setting_select(data->setting, u->mixer_handle);
1912
1913 } else {
1914
1915 if (!u->mixer_path && u->mixer_path_set)
1916 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1917
1918 if (u->mixer_path) {
1919 /* Hmm, we have only a single path, then let's activate it */
1920
1921 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1922
1923 if (u->mixer_path->settings)
1924 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1925 } else
1926 return 0;
1927 }
1928
1929 mixer_volume_init(u);
1930
1931 /* Will we need to register callbacks? */
1932 if (u->mixer_path_set && u->mixer_path_set->paths) {
1933 pa_alsa_path *p;
1934 void *state;
1935
1936 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1937 if (p->has_volume || p->has_mute)
1938 need_mixer_callback = TRUE;
1939 }
1940 }
1941 else if (u->mixer_path)
1942 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1943
1944 if (need_mixer_callback) {
1945 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1946 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1947 u->mixer_pd = pa_alsa_mixer_pdata_new();
1948 mixer_callback = io_mixer_callback;
1949
1950 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1951 pa_log("Failed to initialize file descriptor monitoring");
1952 return -1;
1953 }
1954 } else {
1955 u->mixer_fdl = pa_alsa_fdlist_new();
1956 mixer_callback = ctl_mixer_callback;
1957
1958 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1959 pa_log("Failed to initialize file descriptor monitoring");
1960 return -1;
1961 }
1962 }
1963
1964 if (u->mixer_path_set)
1965 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1966 else
1967 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1968 }
1969
1970 return 0;
1971 }
1972
1973 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1974
1975 struct userdata *u = NULL;
1976 const char *dev_id = NULL;
1977 pa_sample_spec ss;
1978 uint32_t alternate_sample_rate;
1979 pa_channel_map map;
1980 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1981 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1982 size_t frame_size;
1983 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE, fixed_latency_range = FALSE;
1984 pa_sink_new_data data;
1985 pa_alsa_profile_set *profile_set = NULL;
1986
1987 pa_assert(m);
1988 pa_assert(ma);
1989
1990 ss = m->core->default_sample_spec;
1991 map = m->core->default_channel_map;
1992 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1993 pa_log("Failed to parse sample specification and channel map");
1994 goto fail;
1995 }
1996
1997 alternate_sample_rate = m->core->alternate_sample_rate;
1998 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1999 pa_log("Failed to parse alternate sample rate");
2000 goto fail;
2001 }
2002
2003 frame_size = pa_frame_size(&ss);
2004
2005 nfrags = m->core->default_n_fragments;
2006 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2007 if (frag_size <= 0)
2008 frag_size = (uint32_t) frame_size;
2009 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2010 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2011
2012 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2013 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2014 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2015 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2016 pa_log("Failed to parse buffer metrics");
2017 goto fail;
2018 }
2019
2020 buffer_size = nfrags * frag_size;
2021
2022 period_frames = frag_size/frame_size;
2023 buffer_frames = buffer_size/frame_size;
2024 tsched_frames = tsched_size/frame_size;
2025
2026 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2027 pa_log("Failed to parse mmap argument.");
2028 goto fail;
2029 }
2030
2031 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2032 pa_log("Failed to parse tsched argument.");
2033 goto fail;
2034 }
2035
2036 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2037 pa_log("Failed to parse ignore_dB argument.");
2038 goto fail;
2039 }
2040
2041 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2042 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2043 pa_log("Failed to parse rewind_safeguard argument");
2044 goto fail;
2045 }
2046
2047 deferred_volume = m->core->deferred_volume;
2048 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2049 pa_log("Failed to parse deferred_volume argument.");
2050 goto fail;
2051 }
2052
2053 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2054 pa_log("Failed to parse fixed_latency_range argument.");
2055 goto fail;
2056 }
2057
2058 use_tsched = pa_alsa_may_tsched(use_tsched);
2059
2060 u = pa_xnew0(struct userdata, 1);
2061 u->core = m->core;
2062 u->module = m;
2063 u->use_mmap = use_mmap;
2064 u->use_tsched = use_tsched;
2065 u->deferred_volume = deferred_volume;
2066 u->fixed_latency_range = fixed_latency_range;
2067 u->first = TRUE;
2068 u->rewind_safeguard = rewind_safeguard;
2069 u->rtpoll = pa_rtpoll_new();
2070 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2071
2072 u->smoother = pa_smoother_new(
2073 SMOOTHER_ADJUST_USEC,
2074 SMOOTHER_WINDOW_USEC,
2075 TRUE,
2076 TRUE,
2077 5,
2078 pa_rtclock_now(),
2079 TRUE);
2080 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2081
2082 dev_id = pa_modargs_get_value(
2083 ma, "device_id",
2084 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2085
2086 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2087
2088 if (reserve_init(u, dev_id) < 0)
2089 goto fail;
2090
2091 if (reserve_monitor_init(u, dev_id) < 0)
2092 goto fail;
2093
2094 b = use_mmap;
2095 d = use_tsched;
2096
2097 if (mapping) {
2098
2099 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2100 pa_log("device_id= not set");
2101 goto fail;
2102 }
2103
2104 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2105 dev_id,
2106 &u->device_name,
2107 &ss, &map,
2108 SND_PCM_STREAM_PLAYBACK,
2109 &period_frames, &buffer_frames, tsched_frames,
2110 &b, &d, mapping)))
2111 goto fail;
2112
2113 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2114
2115 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2116 goto fail;
2117
2118 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2119 dev_id,
2120 &u->device_name,
2121 &ss, &map,
2122 SND_PCM_STREAM_PLAYBACK,
2123 &period_frames, &buffer_frames, tsched_frames,
2124 &b, &d, profile_set, &mapping)))
2125 goto fail;
2126
2127 } else {
2128
2129 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2130 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2131 &u->device_name,
2132 &ss, &map,
2133 SND_PCM_STREAM_PLAYBACK,
2134 &period_frames, &buffer_frames, tsched_frames,
2135 &b, &d, FALSE)))
2136 goto fail;
2137 }
2138
2139 pa_assert(u->device_name);
2140 pa_log_info("Successfully opened device %s.", u->device_name);
2141
2142 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2143 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2144 goto fail;
2145 }
2146
2147 if (mapping)
2148 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2149
2150 if (use_mmap && !b) {
2151 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2152 u->use_mmap = use_mmap = FALSE;
2153 }
2154
2155 if (use_tsched && (!b || !d)) {
2156 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2157 u->use_tsched = use_tsched = FALSE;
2158 }
2159
2160 if (u->use_mmap)
2161 pa_log_info("Successfully enabled mmap() mode.");
2162
2163 if (u->use_tsched) {
2164 pa_log_info("Successfully enabled timer-based scheduling mode.");
2165
2166 if (u->fixed_latency_range)
2167 pa_log_info("Disabling latency range changes on underrun");
2168 }
2169
2170 if (is_iec958(u) || is_hdmi(u))
2171 set_formats = TRUE;
2172
2173 u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
2174 if (!u->rates) {
2175 pa_log_error("Failed to find any supported sample rates.");
2176 goto fail;
2177 }
2178
2179 /* ALSA might tweak the sample spec, so recalculate the frame size */
2180 frame_size = pa_frame_size(&ss);
2181
2182 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2183
2184 pa_sink_new_data_init(&data);
2185 data.driver = driver;
2186 data.module = m;
2187 data.card = card;
2188 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2189
2190 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2191 * variable instead of using &data.namereg_fail directly, because
2192 * data.namereg_fail is a bitfield and taking the address of a bitfield
2193 * variable is impossible. */
2194 namereg_fail = data.namereg_fail;
2195 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2196 pa_log("Failed to parse namereg_fail argument.");
2197 pa_sink_new_data_done(&data);
2198 goto fail;
2199 }
2200 data.namereg_fail = namereg_fail;
2201
2202 pa_sink_new_data_set_sample_spec(&data, &ss);
2203 pa_sink_new_data_set_channel_map(&data, &map);
2204 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2205
2206 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2207 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2208 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2209 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2210 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2211
2212 if (mapping) {
2213 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2214 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2215 }
2216
2217 pa_alsa_init_description(data.proplist);
2218
2219 if (u->control_device)
2220 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2221
2222 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2223 pa_log("Invalid properties");
2224 pa_sink_new_data_done(&data);
2225 goto fail;
2226 }
2227
2228 if (u->mixer_path_set)
2229 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2230
2231 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2232 (set_formats ? PA_SINK_SET_FORMATS : 0));
2233 pa_sink_new_data_done(&data);
2234
2235 if (!u->sink) {
2236 pa_log("Failed to create sink object");
2237 goto fail;
2238 }
2239
2240 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2241 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2242 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2243 goto fail;
2244 }
2245
2246 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2247 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2248 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2249 goto fail;
2250 }
2251
2252 u->sink->parent.process_msg = sink_process_msg;
2253 if (u->use_tsched)
2254 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2255 u->sink->set_state = sink_set_state_cb;
2256 u->sink->set_port = sink_set_port_cb;
2257 if (u->sink->alternate_sample_rate)
2258 u->sink->update_rate = sink_update_rate_cb;
2259 u->sink->userdata = u;
2260
2261 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2262 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2263
2264 u->frame_size = frame_size;
2265 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2266 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2267 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2268
2269 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2270 (double) u->hwbuf_size / (double) u->fragment_size,
2271 (long unsigned) u->fragment_size,
2272 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2273 (long unsigned) u->hwbuf_size,
2274 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2275
2276 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2277 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2278 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2279 else {
2280 pa_log_info("Disabling rewind for device %s", u->device_name);
2281 pa_sink_set_max_rewind(u->sink, 0);
2282 }
2283
2284 if (u->use_tsched) {
2285 u->tsched_watermark_ref = tsched_watermark;
2286 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2287 } else
2288 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2289
2290 reserve_update(u);
2291
2292 if (update_sw_params(u) < 0)
2293 goto fail;
2294
2295 if (setup_mixer(u, ignore_dB) < 0)
2296 goto fail;
2297
2298 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2299
2300 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2301 pa_log("Failed to create thread.");
2302 goto fail;
2303 }
2304
2305 /* Get initial mixer settings */
2306 if (data.volume_is_set) {
2307 if (u->sink->set_volume)
2308 u->sink->set_volume(u->sink);
2309 } else {
2310 if (u->sink->get_volume)
2311 u->sink->get_volume(u->sink);
2312 }
2313
2314 if (data.muted_is_set) {
2315 if (u->sink->set_mute)
2316 u->sink->set_mute(u->sink);
2317 } else {
2318 if (u->sink->get_mute)
2319 u->sink->get_mute(u->sink);
2320 }
2321
2322 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2323 u->sink->write_volume(u->sink);
2324
2325 if (set_formats) {
2326 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2327 pa_format_info *format;
2328
2329 /* To start with, we only support PCM formats. Other formats may be added
2330 * with pa_sink_set_formats().*/
2331 format = pa_format_info_new();
2332 format->encoding = PA_ENCODING_PCM;
2333 u->formats = pa_idxset_new(NULL, NULL);
2334 pa_idxset_put(u->formats, format, NULL);
2335
2336 u->sink->get_formats = sink_get_formats;
2337 u->sink->set_formats = sink_set_formats;
2338 }
2339
2340 pa_sink_put(u->sink);
2341
2342 if (profile_set)
2343 pa_alsa_profile_set_free(profile_set);
2344
2345 return u->sink;
2346
2347 fail:
2348
2349 if (u)
2350 userdata_free(u);
2351
2352 if (profile_set)
2353 pa_alsa_profile_set_free(profile_set);
2354
2355 return NULL;
2356 }
2357
2358 static void userdata_free(struct userdata *u) {
2359 pa_assert(u);
2360
2361 if (u->sink)
2362 pa_sink_unlink(u->sink);
2363
2364 if (u->thread) {
2365 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2366 pa_thread_free(u->thread);
2367 }
2368
2369 pa_thread_mq_done(&u->thread_mq);
2370
2371 if (u->sink)
2372 pa_sink_unref(u->sink);
2373
2374 if (u->memchunk.memblock)
2375 pa_memblock_unref(u->memchunk.memblock);
2376
2377 if (u->mixer_pd)
2378 pa_alsa_mixer_pdata_free(u->mixer_pd);
2379
2380 if (u->alsa_rtpoll_item)
2381 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2382
2383 if (u->rtpoll)
2384 pa_rtpoll_free(u->rtpoll);
2385
2386 if (u->pcm_handle) {
2387 snd_pcm_drop(u->pcm_handle);
2388 snd_pcm_close(u->pcm_handle);
2389 }
2390
2391 if (u->mixer_fdl)
2392 pa_alsa_fdlist_free(u->mixer_fdl);
2393
2394 if (u->mixer_path && !u->mixer_path_set)
2395 pa_alsa_path_free(u->mixer_path);
2396
2397 if (u->mixer_handle)
2398 snd_mixer_close(u->mixer_handle);
2399
2400 if (u->smoother)
2401 pa_smoother_free(u->smoother);
2402
2403 if (u->formats)
2404 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2405
2406 if (u->rates)
2407 pa_xfree(u->rates);
2408
2409 reserve_done(u);
2410 monitor_done(u);
2411
2412 pa_xfree(u->device_name);
2413 pa_xfree(u->control_device);
2414 pa_xfree(u->paths_dir);
2415 pa_xfree(u);
2416 }
2417
2418 void pa_alsa_sink_free(pa_sink *s) {
2419 struct userdata *u;
2420
2421 pa_sink_assert_ref(s);
2422 pa_assert_se(u = s->userdata);
2423
2424 userdata_free(u);
2425 }