]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
0b7bbe00bea8b3aa819e42136ef6f852925cc854
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 char *paths_dir;
104 pa_alsa_fdlist *mixer_fdl;
105 pa_alsa_mixer_pdata *mixer_pd;
106 snd_mixer_t *mixer_handle;
107 pa_alsa_path_set *mixer_path_set;
108 pa_alsa_path *mixer_path;
109
110 pa_cvolume hardware_volume;
111
112 uint32_t old_rate;
113
114 size_t
115 frame_size,
116 fragment_size,
117 hwbuf_size,
118 tsched_watermark,
119 tsched_watermark_ref,
120 hwbuf_unused,
121 min_sleep,
122 min_wakeup,
123 watermark_inc_step,
124 watermark_dec_step,
125 watermark_inc_threshold,
126 watermark_dec_threshold,
127 rewind_safeguard;
128
129 pa_usec_t watermark_dec_not_before;
130 pa_usec_t min_latency_ref;
131
132 pa_memchunk memchunk;
133
134 char *device_name; /* name of the PCM device */
135 char *control_device; /* name of the control device */
136
137 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1;
138
139 pa_bool_t first, after_rewind;
140
141 pa_rtpoll_item *alsa_rtpoll_item;
142
143 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
144
145 pa_smoother *smoother;
146 uint64_t write_count;
147 uint64_t since_start;
148 pa_usec_t smoother_interval;
149 pa_usec_t last_smoother_update;
150
151 pa_idxset *formats;
152
153 pa_reserve_wrapper *reserve;
154 pa_hook_slot *reserve_slot;
155 pa_reserve_monitor_wrapper *monitor;
156 pa_hook_slot *monitor_slot;
157 };
158
159 static void userdata_free(struct userdata *u);
160
161 /* FIXME: Is there a better way to do this than device names? */
162 static pa_bool_t is_iec958(struct userdata *u) {
163 return (strncmp("iec958", u->device_name, 6) == 0);
164 }
165
166 static pa_bool_t is_hdmi(struct userdata *u) {
167 return (strncmp("hdmi", u->device_name, 4) == 0);
168 }
169
170 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
171 pa_assert(r);
172 pa_assert(u);
173
174 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
175 return PA_HOOK_CANCEL;
176
177 return PA_HOOK_OK;
178 }
179
180 static void reserve_done(struct userdata *u) {
181 pa_assert(u);
182
183 if (u->reserve_slot) {
184 pa_hook_slot_free(u->reserve_slot);
185 u->reserve_slot = NULL;
186 }
187
188 if (u->reserve) {
189 pa_reserve_wrapper_unref(u->reserve);
190 u->reserve = NULL;
191 }
192 }
193
194 static void reserve_update(struct userdata *u) {
195 const char *description;
196 pa_assert(u);
197
198 if (!u->sink || !u->reserve)
199 return;
200
201 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
202 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
203 }
204
205 static int reserve_init(struct userdata *u, const char *dname) {
206 char *rname;
207
208 pa_assert(u);
209 pa_assert(dname);
210
211 if (u->reserve)
212 return 0;
213
214 if (pa_in_system_mode())
215 return 0;
216
217 if (!(rname = pa_alsa_get_reserve_name(dname)))
218 return 0;
219
220 /* We are resuming, try to lock the device */
221 u->reserve = pa_reserve_wrapper_get(u->core, rname);
222 pa_xfree(rname);
223
224 if (!(u->reserve))
225 return -1;
226
227 reserve_update(u);
228
229 pa_assert(!u->reserve_slot);
230 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
231
232 return 0;
233 }
234
235 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
236 pa_bool_t b;
237
238 pa_assert(w);
239 pa_assert(u);
240
241 b = PA_PTR_TO_UINT(busy) && !u->reserve;
242
243 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
244 return PA_HOOK_OK;
245 }
246
247 static void monitor_done(struct userdata *u) {
248 pa_assert(u);
249
250 if (u->monitor_slot) {
251 pa_hook_slot_free(u->monitor_slot);
252 u->monitor_slot = NULL;
253 }
254
255 if (u->monitor) {
256 pa_reserve_monitor_wrapper_unref(u->monitor);
257 u->monitor = NULL;
258 }
259 }
260
261 static int reserve_monitor_init(struct userdata *u, const char *dname) {
262 char *rname;
263
264 pa_assert(u);
265 pa_assert(dname);
266
267 if (pa_in_system_mode())
268 return 0;
269
270 if (!(rname = pa_alsa_get_reserve_name(dname)))
271 return 0;
272
273 /* We are resuming, try to lock the device */
274 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
275 pa_xfree(rname);
276
277 if (!(u->monitor))
278 return -1;
279
280 pa_assert(!u->monitor_slot);
281 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
282
283 return 0;
284 }
285
286 static void fix_min_sleep_wakeup(struct userdata *u) {
287 size_t max_use, max_use_2;
288
289 pa_assert(u);
290 pa_assert(u->use_tsched);
291
292 max_use = u->hwbuf_size - u->hwbuf_unused;
293 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
294
295 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
296 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
297
298 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
299 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
300 }
301
302 static void fix_tsched_watermark(struct userdata *u) {
303 size_t max_use;
304 pa_assert(u);
305 pa_assert(u->use_tsched);
306
307 max_use = u->hwbuf_size - u->hwbuf_unused;
308
309 if (u->tsched_watermark > max_use - u->min_sleep)
310 u->tsched_watermark = max_use - u->min_sleep;
311
312 if (u->tsched_watermark < u->min_wakeup)
313 u->tsched_watermark = u->min_wakeup;
314 }
315
316 static void increase_watermark(struct userdata *u) {
317 size_t old_watermark;
318 pa_usec_t old_min_latency, new_min_latency;
319
320 pa_assert(u);
321 pa_assert(u->use_tsched);
322
323 /* First, just try to increase the watermark */
324 old_watermark = u->tsched_watermark;
325 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
326 fix_tsched_watermark(u);
327
328 if (old_watermark != u->tsched_watermark) {
329 pa_log_info("Increasing wakeup watermark to %0.2f ms",
330 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
331 return;
332 }
333
334 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
335 old_min_latency = u->sink->thread_info.min_latency;
336 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
337 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
338
339 if (old_min_latency != new_min_latency) {
340 pa_log_info("Increasing minimal latency to %0.2f ms",
341 (double) new_min_latency / PA_USEC_PER_MSEC);
342
343 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
344 }
345
346 /* When we reach this we're officialy fucked! */
347 }
348
349 static void decrease_watermark(struct userdata *u) {
350 size_t old_watermark;
351 pa_usec_t now;
352
353 pa_assert(u);
354 pa_assert(u->use_tsched);
355
356 now = pa_rtclock_now();
357
358 if (u->watermark_dec_not_before <= 0)
359 goto restart;
360
361 if (u->watermark_dec_not_before > now)
362 return;
363
364 old_watermark = u->tsched_watermark;
365
366 if (u->tsched_watermark < u->watermark_dec_step)
367 u->tsched_watermark = u->tsched_watermark / 2;
368 else
369 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
370
371 fix_tsched_watermark(u);
372
373 if (old_watermark != u->tsched_watermark)
374 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
375 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
376
377 /* We don't change the latency range*/
378
379 restart:
380 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
381 }
382
383 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
384 pa_usec_t usec, wm;
385
386 pa_assert(sleep_usec);
387 pa_assert(process_usec);
388
389 pa_assert(u);
390 pa_assert(u->use_tsched);
391
392 usec = pa_sink_get_requested_latency_within_thread(u->sink);
393
394 if (usec == (pa_usec_t) -1)
395 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
396
397 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
398
399 if (wm > usec)
400 wm = usec/2;
401
402 *sleep_usec = usec - wm;
403 *process_usec = wm;
404
405 #ifdef DEBUG_TIMING
406 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
407 (unsigned long) (usec / PA_USEC_PER_MSEC),
408 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
409 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
410 #endif
411 }
412
413 static int try_recover(struct userdata *u, const char *call, int err) {
414 pa_assert(u);
415 pa_assert(call);
416 pa_assert(err < 0);
417
418 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
419
420 pa_assert(err != -EAGAIN);
421
422 if (err == -EPIPE)
423 pa_log_debug("%s: Buffer underrun!", call);
424
425 if (err == -ESTRPIPE)
426 pa_log_debug("%s: System suspended!", call);
427
428 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
429 pa_log("%s: %s", call, pa_alsa_strerror(err));
430 return -1;
431 }
432
433 u->first = TRUE;
434 u->since_start = 0;
435 return 0;
436 }
437
438 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
439 size_t left_to_play;
440 pa_bool_t underrun = FALSE;
441
442 /* We use <= instead of < for this check here because an underrun
443 * only happens after the last sample was processed, not already when
444 * it is removed from the buffer. This is particularly important
445 * when block transfer is used. */
446
447 if (n_bytes <= u->hwbuf_size)
448 left_to_play = u->hwbuf_size - n_bytes;
449 else {
450
451 /* We got a dropout. What a mess! */
452 left_to_play = 0;
453 underrun = TRUE;
454
455 #ifdef DEBUG_TIMING
456 PA_DEBUG_TRAP;
457 #endif
458
459 if (!u->first && !u->after_rewind)
460 if (pa_log_ratelimit(PA_LOG_INFO))
461 pa_log_info("Underrun!");
462 }
463
464 #ifdef DEBUG_TIMING
465 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
466 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
467 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
468 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
469 #endif
470
471 if (u->use_tsched) {
472 pa_bool_t reset_not_before = TRUE;
473
474 if (!u->first && !u->after_rewind) {
475 if (underrun || left_to_play < u->watermark_inc_threshold)
476 increase_watermark(u);
477 else if (left_to_play > u->watermark_dec_threshold) {
478 reset_not_before = FALSE;
479
480 /* We decrease the watermark only if have actually
481 * been woken up by a timeout. If something else woke
482 * us up it's too easy to fulfill the deadlines... */
483
484 if (on_timeout)
485 decrease_watermark(u);
486 }
487 }
488
489 if (reset_not_before)
490 u->watermark_dec_not_before = 0;
491 }
492
493 return left_to_play;
494 }
495
496 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
497 pa_bool_t work_done = FALSE;
498 pa_usec_t max_sleep_usec = 0, process_usec = 0;
499 size_t left_to_play;
500 unsigned j = 0;
501
502 pa_assert(u);
503 pa_sink_assert_ref(u->sink);
504
505 if (u->use_tsched)
506 hw_sleep_time(u, &max_sleep_usec, &process_usec);
507
508 for (;;) {
509 snd_pcm_sframes_t n;
510 size_t n_bytes;
511 int r;
512 pa_bool_t after_avail = TRUE;
513
514 /* First we determine how many samples are missing to fill the
515 * buffer up to 100% */
516
517 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
518
519 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
520 continue;
521
522 return r;
523 }
524
525 n_bytes = (size_t) n * u->frame_size;
526
527 #ifdef DEBUG_TIMING
528 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
529 #endif
530
531 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
532 on_timeout = FALSE;
533
534 if (u->use_tsched)
535
536 /* We won't fill up the playback buffer before at least
537 * half the sleep time is over because otherwise we might
538 * ask for more data from the clients then they expect. We
539 * need to guarantee that clients only have to keep around
540 * a single hw buffer length. */
541
542 if (!polled &&
543 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
544 #ifdef DEBUG_TIMING
545 pa_log_debug("Not filling up, because too early.");
546 #endif
547 break;
548 }
549
550 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
551
552 if (polled)
553 PA_ONCE_BEGIN {
554 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
555 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
556 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
557 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
558 pa_strnull(dn));
559 pa_xfree(dn);
560 } PA_ONCE_END;
561
562 #ifdef DEBUG_TIMING
563 pa_log_debug("Not filling up, because not necessary.");
564 #endif
565 break;
566 }
567
568
569 if (++j > 10) {
570 #ifdef DEBUG_TIMING
571 pa_log_debug("Not filling up, because already too many iterations.");
572 #endif
573
574 break;
575 }
576
577 n_bytes -= u->hwbuf_unused;
578 polled = FALSE;
579
580 #ifdef DEBUG_TIMING
581 pa_log_debug("Filling up");
582 #endif
583
584 for (;;) {
585 pa_memchunk chunk;
586 void *p;
587 int err;
588 const snd_pcm_channel_area_t *areas;
589 snd_pcm_uframes_t offset, frames;
590 snd_pcm_sframes_t sframes;
591
592 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
593 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
594
595 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
596
597 if (!after_avail && err == -EAGAIN)
598 break;
599
600 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
601 continue;
602
603 return r;
604 }
605
606 /* Make sure that if these memblocks need to be copied they will fit into one slot */
607 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
608 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
609
610 if (!after_avail && frames == 0)
611 break;
612
613 pa_assert(frames > 0);
614 after_avail = FALSE;
615
616 /* Check these are multiples of 8 bit */
617 pa_assert((areas[0].first & 7) == 0);
618 pa_assert((areas[0].step & 7)== 0);
619
620 /* We assume a single interleaved memory buffer */
621 pa_assert((areas[0].first >> 3) == 0);
622 pa_assert((areas[0].step >> 3) == u->frame_size);
623
624 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
625
626 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
627 chunk.length = pa_memblock_get_length(chunk.memblock);
628 chunk.index = 0;
629
630 pa_sink_render_into_full(u->sink, &chunk);
631 pa_memblock_unref_fixed(chunk.memblock);
632
633 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
634
635 if (!after_avail && (int) sframes == -EAGAIN)
636 break;
637
638 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
639 continue;
640
641 return r;
642 }
643
644 work_done = TRUE;
645
646 u->write_count += frames * u->frame_size;
647 u->since_start += frames * u->frame_size;
648
649 #ifdef DEBUG_TIMING
650 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
651 #endif
652
653 if ((size_t) frames * u->frame_size >= n_bytes)
654 break;
655
656 n_bytes -= (size_t) frames * u->frame_size;
657 }
658 }
659
660 if (u->use_tsched) {
661 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
662 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
663
664 if (*sleep_usec > process_usec)
665 *sleep_usec -= process_usec;
666 else
667 *sleep_usec = 0;
668 } else
669 *sleep_usec = 0;
670
671 return work_done ? 1 : 0;
672 }
673
674 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
675 pa_bool_t work_done = FALSE;
676 pa_usec_t max_sleep_usec = 0, process_usec = 0;
677 size_t left_to_play;
678 unsigned j = 0;
679
680 pa_assert(u);
681 pa_sink_assert_ref(u->sink);
682
683 if (u->use_tsched)
684 hw_sleep_time(u, &max_sleep_usec, &process_usec);
685
686 for (;;) {
687 snd_pcm_sframes_t n;
688 size_t n_bytes;
689 int r;
690 pa_bool_t after_avail = TRUE;
691
692 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
693
694 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
695 continue;
696
697 return r;
698 }
699
700 n_bytes = (size_t) n * u->frame_size;
701 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
702 on_timeout = FALSE;
703
704 if (u->use_tsched)
705
706 /* We won't fill up the playback buffer before at least
707 * half the sleep time is over because otherwise we might
708 * ask for more data from the clients then they expect. We
709 * need to guarantee that clients only have to keep around
710 * a single hw buffer length. */
711
712 if (!polled &&
713 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
714 break;
715
716 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
717
718 if (polled)
719 PA_ONCE_BEGIN {
720 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
721 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
722 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
723 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
724 pa_strnull(dn));
725 pa_xfree(dn);
726 } PA_ONCE_END;
727
728 break;
729 }
730
731 if (++j > 10) {
732 #ifdef DEBUG_TIMING
733 pa_log_debug("Not filling up, because already too many iterations.");
734 #endif
735
736 break;
737 }
738
739 n_bytes -= u->hwbuf_unused;
740 polled = FALSE;
741
742 for (;;) {
743 snd_pcm_sframes_t frames;
744 void *p;
745
746 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
747
748 if (u->memchunk.length <= 0)
749 pa_sink_render(u->sink, n_bytes, &u->memchunk);
750
751 pa_assert(u->memchunk.length > 0);
752
753 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
754
755 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
756 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
757
758 p = pa_memblock_acquire(u->memchunk.memblock);
759 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
760 pa_memblock_release(u->memchunk.memblock);
761
762 if (PA_UNLIKELY(frames < 0)) {
763
764 if (!after_avail && (int) frames == -EAGAIN)
765 break;
766
767 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
768 continue;
769
770 return r;
771 }
772
773 if (!after_avail && frames == 0)
774 break;
775
776 pa_assert(frames > 0);
777 after_avail = FALSE;
778
779 u->memchunk.index += (size_t) frames * u->frame_size;
780 u->memchunk.length -= (size_t) frames * u->frame_size;
781
782 if (u->memchunk.length <= 0) {
783 pa_memblock_unref(u->memchunk.memblock);
784 pa_memchunk_reset(&u->memchunk);
785 }
786
787 work_done = TRUE;
788
789 u->write_count += frames * u->frame_size;
790 u->since_start += frames * u->frame_size;
791
792 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
793
794 if ((size_t) frames * u->frame_size >= n_bytes)
795 break;
796
797 n_bytes -= (size_t) frames * u->frame_size;
798 }
799 }
800
801 if (u->use_tsched) {
802 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
803 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
804
805 if (*sleep_usec > process_usec)
806 *sleep_usec -= process_usec;
807 else
808 *sleep_usec = 0;
809 } else
810 *sleep_usec = 0;
811
812 return work_done ? 1 : 0;
813 }
814
815 static void update_smoother(struct userdata *u) {
816 snd_pcm_sframes_t delay = 0;
817 int64_t position;
818 int err;
819 pa_usec_t now1 = 0, now2;
820 snd_pcm_status_t *status;
821
822 snd_pcm_status_alloca(&status);
823
824 pa_assert(u);
825 pa_assert(u->pcm_handle);
826
827 /* Let's update the time smoother */
828
829 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
830 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
831 return;
832 }
833
834 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
835 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
836 else {
837 snd_htimestamp_t htstamp = { 0, 0 };
838 snd_pcm_status_get_htstamp(status, &htstamp);
839 now1 = pa_timespec_load(&htstamp);
840 }
841
842 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
843 if (now1 <= 0)
844 now1 = pa_rtclock_now();
845
846 /* check if the time since the last update is bigger than the interval */
847 if (u->last_smoother_update > 0)
848 if (u->last_smoother_update + u->smoother_interval > now1)
849 return;
850
851 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
852
853 if (PA_UNLIKELY(position < 0))
854 position = 0;
855
856 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
857
858 pa_smoother_put(u->smoother, now1, now2);
859
860 u->last_smoother_update = now1;
861 /* exponentially increase the update interval up to the MAX limit */
862 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
863 }
864
865 static pa_usec_t sink_get_latency(struct userdata *u) {
866 pa_usec_t r;
867 int64_t delay;
868 pa_usec_t now1, now2;
869
870 pa_assert(u);
871
872 now1 = pa_rtclock_now();
873 now2 = pa_smoother_get(u->smoother, now1);
874
875 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
876
877 r = delay >= 0 ? (pa_usec_t) delay : 0;
878
879 if (u->memchunk.memblock)
880 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
881
882 return r;
883 }
884
885 static int build_pollfd(struct userdata *u) {
886 pa_assert(u);
887 pa_assert(u->pcm_handle);
888
889 if (u->alsa_rtpoll_item)
890 pa_rtpoll_item_free(u->alsa_rtpoll_item);
891
892 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
893 return -1;
894
895 return 0;
896 }
897
898 /* Called from IO context */
899 static int suspend(struct userdata *u) {
900 pa_assert(u);
901 pa_assert(u->pcm_handle);
902
903 pa_smoother_pause(u->smoother, pa_rtclock_now());
904
905 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
906 * take awfully long with our long buffer sizes today. */
907 snd_pcm_close(u->pcm_handle);
908 u->pcm_handle = NULL;
909
910 if (u->alsa_rtpoll_item) {
911 pa_rtpoll_item_free(u->alsa_rtpoll_item);
912 u->alsa_rtpoll_item = NULL;
913 }
914
915 /* We reset max_rewind/max_request here to make sure that while we
916 * are suspended the old max_request/max_rewind values set before
917 * the suspend can influence the per-stream buffer of newly
918 * created streams, without their requirements having any
919 * influence on them. */
920 pa_sink_set_max_rewind_within_thread(u->sink, 0);
921 pa_sink_set_max_request_within_thread(u->sink, 0);
922
923 pa_log_info("Device suspended...");
924
925 return 0;
926 }
927
928 /* Called from IO context */
929 static int update_sw_params(struct userdata *u) {
930 snd_pcm_uframes_t avail_min;
931 int err;
932
933 pa_assert(u);
934
935 /* Use the full buffer if no one asked us for anything specific */
936 u->hwbuf_unused = 0;
937
938 if (u->use_tsched) {
939 pa_usec_t latency;
940
941 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
942 size_t b;
943
944 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
945
946 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
947
948 /* We need at least one sample in our buffer */
949
950 if (PA_UNLIKELY(b < u->frame_size))
951 b = u->frame_size;
952
953 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
954 }
955
956 fix_min_sleep_wakeup(u);
957 fix_tsched_watermark(u);
958 }
959
960 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
961
962 /* We need at last one frame in the used part of the buffer */
963 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
964
965 if (u->use_tsched) {
966 pa_usec_t sleep_usec, process_usec;
967
968 hw_sleep_time(u, &sleep_usec, &process_usec);
969 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
970 }
971
972 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
973
974 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
975 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
976 return err;
977 }
978
979 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
980 if (pa_alsa_pcm_is_hw(u->pcm_handle))
981 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
982 else {
983 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
984 pa_sink_set_max_rewind_within_thread(u->sink, 0);
985 }
986
987 return 0;
988 }
989
990 /* Called from IO Context on unsuspend or from main thread when creating sink */
991 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
992 pa_bool_t in_thread)
993 {
994 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
995 &u->sink->sample_spec);
996
997 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
998 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
999
1000 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1001 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1002
1003 fix_min_sleep_wakeup(u);
1004 fix_tsched_watermark(u);
1005
1006 if (in_thread)
1007 pa_sink_set_latency_range_within_thread(u->sink,
1008 u->min_latency_ref,
1009 pa_bytes_to_usec(u->hwbuf_size, ss));
1010 else {
1011 pa_sink_set_latency_range(u->sink,
1012 0,
1013 pa_bytes_to_usec(u->hwbuf_size, ss));
1014
1015 /* work-around assert in pa_sink_set_latency_within_thead,
1016 keep track of min_latency and reuse it when
1017 this routine is called from IO context */
1018 u->min_latency_ref = u->sink->thread_info.min_latency;
1019 }
1020
1021 pa_log_info("Time scheduling watermark is %0.2fms",
1022 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
1023 }
1024
1025 /* Called from IO context */
1026 static int unsuspend(struct userdata *u) {
1027 pa_sample_spec ss;
1028 int err;
1029 pa_bool_t b, d;
1030 snd_pcm_uframes_t period_size, buffer_size;
1031 char *device_name = NULL;
1032
1033 pa_assert(u);
1034 pa_assert(!u->pcm_handle);
1035
1036 pa_log_info("Trying resume...");
1037
1038 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1039 /* Need to open device in NONAUDIO mode */
1040 int len = strlen(u->device_name) + 8;
1041
1042 device_name = pa_xmalloc(len);
1043 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1044 }
1045
1046 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1047 SND_PCM_NONBLOCK|
1048 SND_PCM_NO_AUTO_RESAMPLE|
1049 SND_PCM_NO_AUTO_CHANNELS|
1050 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1051 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1052 goto fail;
1053 }
1054
1055 ss = u->sink->sample_spec;
1056 period_size = u->fragment_size / u->frame_size;
1057 buffer_size = u->hwbuf_size / u->frame_size;
1058 b = u->use_mmap;
1059 d = u->use_tsched;
1060
1061 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1062 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1063 goto fail;
1064 }
1065
1066 if (b != u->use_mmap || d != u->use_tsched) {
1067 pa_log_warn("Resume failed, couldn't get original access mode.");
1068 goto fail;
1069 }
1070
1071 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1072 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1073 goto fail;
1074 }
1075
1076 if (period_size*u->frame_size != u->fragment_size ||
1077 buffer_size*u->frame_size != u->hwbuf_size) {
1078 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1079 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1080 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1081 goto fail;
1082 }
1083
1084 if (update_sw_params(u) < 0)
1085 goto fail;
1086
1087 if (build_pollfd(u) < 0)
1088 goto fail;
1089
1090 u->write_count = 0;
1091 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1092 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1093 u->last_smoother_update = 0;
1094
1095 u->first = TRUE;
1096 u->since_start = 0;
1097
1098 /* reset the watermark to the value defined when sink was created */
1099 if (u->use_tsched)
1100 reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
1101
1102 pa_log_info("Resumed successfully...");
1103
1104 pa_xfree(device_name);
1105 return 0;
1106
1107 fail:
1108 if (u->pcm_handle) {
1109 snd_pcm_close(u->pcm_handle);
1110 u->pcm_handle = NULL;
1111 }
1112
1113 pa_xfree(device_name);
1114
1115 return -PA_ERR_IO;
1116 }
1117
1118 /* Called from IO context */
1119 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1120 struct userdata *u = PA_SINK(o)->userdata;
1121
1122 switch (code) {
1123
1124 case PA_SINK_MESSAGE_GET_LATENCY: {
1125 pa_usec_t r = 0;
1126
1127 if (u->pcm_handle)
1128 r = sink_get_latency(u);
1129
1130 *((pa_usec_t*) data) = r;
1131
1132 return 0;
1133 }
1134
1135 case PA_SINK_MESSAGE_SET_STATE:
1136
1137 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1138
1139 case PA_SINK_SUSPENDED: {
1140 int r;
1141
1142 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1143
1144 if ((r = suspend(u)) < 0)
1145 return r;
1146
1147 break;
1148 }
1149
1150 case PA_SINK_IDLE:
1151 case PA_SINK_RUNNING: {
1152 int r;
1153
1154 if (u->sink->thread_info.state == PA_SINK_INIT) {
1155 if (build_pollfd(u) < 0)
1156 return -PA_ERR_IO;
1157 }
1158
1159 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1160 if ((r = unsuspend(u)) < 0)
1161 return r;
1162 }
1163
1164 break;
1165 }
1166
1167 case PA_SINK_UNLINKED:
1168 case PA_SINK_INIT:
1169 case PA_SINK_INVALID_STATE:
1170 ;
1171 }
1172
1173 break;
1174 }
1175
1176 return pa_sink_process_msg(o, code, data, offset, chunk);
1177 }
1178
1179 /* Called from main context */
1180 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1181 pa_sink_state_t old_state;
1182 struct userdata *u;
1183
1184 pa_sink_assert_ref(s);
1185 pa_assert_se(u = s->userdata);
1186
1187 old_state = pa_sink_get_state(u->sink);
1188
1189 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1190 reserve_done(u);
1191 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1192 if (reserve_init(u, u->device_name) < 0)
1193 return -PA_ERR_BUSY;
1194
1195 return 0;
1196 }
1197
1198 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1199 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1200
1201 pa_assert(u);
1202 pa_assert(u->mixer_handle);
1203
1204 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1205 return 0;
1206
1207 if (!PA_SINK_IS_LINKED(u->sink->state))
1208 return 0;
1209
1210 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1211 return 0;
1212
1213 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1214 pa_sink_get_volume(u->sink, TRUE);
1215 pa_sink_get_mute(u->sink, TRUE);
1216 }
1217
1218 return 0;
1219 }
1220
1221 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1222 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1223
1224 pa_assert(u);
1225 pa_assert(u->mixer_handle);
1226
1227 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1228 return 0;
1229
1230 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1231 return 0;
1232
1233 if (mask & SND_CTL_EVENT_MASK_VALUE)
1234 pa_sink_update_volume_and_mute(u->sink);
1235
1236 return 0;
1237 }
1238
1239 static void sink_get_volume_cb(pa_sink *s) {
1240 struct userdata *u = s->userdata;
1241 pa_cvolume r;
1242 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1243
1244 pa_assert(u);
1245 pa_assert(u->mixer_path);
1246 pa_assert(u->mixer_handle);
1247
1248 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1249 return;
1250
1251 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1252 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1253
1254 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1255
1256 if (u->mixer_path->has_dB) {
1257 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1258
1259 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1260 }
1261
1262 if (pa_cvolume_equal(&u->hardware_volume, &r))
1263 return;
1264
1265 s->real_volume = u->hardware_volume = r;
1266
1267 /* Hmm, so the hardware volume changed, let's reset our software volume */
1268 if (u->mixer_path->has_dB)
1269 pa_sink_set_soft_volume(s, NULL);
1270 }
1271
1272 static void sink_set_volume_cb(pa_sink *s) {
1273 struct userdata *u = s->userdata;
1274 pa_cvolume r;
1275 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1276 pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1277
1278 pa_assert(u);
1279 pa_assert(u->mixer_path);
1280 pa_assert(u->mixer_handle);
1281
1282 /* Shift up by the base volume */
1283 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1284
1285 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1286 return;
1287
1288 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1289 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1290
1291 u->hardware_volume = r;
1292
1293 if (u->mixer_path->has_dB) {
1294 pa_cvolume new_soft_volume;
1295 pa_bool_t accurate_enough;
1296 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1297
1298 /* Match exactly what the user requested by software */
1299 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1300
1301 /* If the adjustment to do in software is only minimal we
1302 * can skip it. That saves us CPU at the expense of a bit of
1303 * accuracy */
1304 accurate_enough =
1305 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1306 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1307
1308 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1309 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1310 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1311 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1312 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1313 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1314 pa_yes_no(accurate_enough));
1315 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1316
1317 if (!accurate_enough)
1318 s->soft_volume = new_soft_volume;
1319
1320 } else {
1321 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1322
1323 /* We can't match exactly what the user requested, hence let's
1324 * at least tell the user about it */
1325
1326 s->real_volume = r;
1327 }
1328 }
1329
1330 static void sink_write_volume_cb(pa_sink *s) {
1331 struct userdata *u = s->userdata;
1332 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1333
1334 pa_assert(u);
1335 pa_assert(u->mixer_path);
1336 pa_assert(u->mixer_handle);
1337 pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1338
1339 /* Shift up by the base volume */
1340 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1341
1342 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1343 pa_log_error("Writing HW volume failed");
1344 else {
1345 pa_cvolume tmp_vol;
1346 pa_bool_t accurate_enough;
1347
1348 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1349 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1350
1351 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1352 accurate_enough =
1353 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1354 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1355
1356 if (!accurate_enough) {
1357 union {
1358 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1359 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1360 } vol;
1361
1362 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1363 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1364 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1365 pa_log_debug(" in dB: %s (request) != %s",
1366 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1367 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1368 }
1369 }
1370 }
1371
1372 static void sink_get_mute_cb(pa_sink *s) {
1373 struct userdata *u = s->userdata;
1374 pa_bool_t b;
1375
1376 pa_assert(u);
1377 pa_assert(u->mixer_path);
1378 pa_assert(u->mixer_handle);
1379
1380 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1381 return;
1382
1383 s->muted = b;
1384 }
1385
1386 static void sink_set_mute_cb(pa_sink *s) {
1387 struct userdata *u = s->userdata;
1388
1389 pa_assert(u);
1390 pa_assert(u->mixer_path);
1391 pa_assert(u->mixer_handle);
1392
1393 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1394 }
1395
1396 static void mixer_volume_init(struct userdata *u) {
1397 pa_assert(u);
1398
1399 if (!u->mixer_path->has_volume) {
1400 pa_sink_set_write_volume_callback(u->sink, NULL);
1401 pa_sink_set_get_volume_callback(u->sink, NULL);
1402 pa_sink_set_set_volume_callback(u->sink, NULL);
1403
1404 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1405 } else {
1406 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1407 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1408
1409 if (u->mixer_path->has_dB && u->deferred_volume) {
1410 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1411 pa_log_info("Successfully enabled synchronous volume.");
1412 } else
1413 pa_sink_set_write_volume_callback(u->sink, NULL);
1414
1415 if (u->mixer_path->has_dB) {
1416 pa_sink_enable_decibel_volume(u->sink, TRUE);
1417 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1418
1419 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1420 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1421
1422 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1423 } else {
1424 pa_sink_enable_decibel_volume(u->sink, FALSE);
1425 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1426
1427 u->sink->base_volume = PA_VOLUME_NORM;
1428 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1429 }
1430
1431 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1432 }
1433
1434 if (!u->mixer_path->has_mute) {
1435 pa_sink_set_get_mute_callback(u->sink, NULL);
1436 pa_sink_set_set_mute_callback(u->sink, NULL);
1437 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1438 } else {
1439 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1440 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1441 pa_log_info("Using hardware mute control.");
1442 }
1443 }
1444
1445 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1446 struct userdata *u = s->userdata;
1447 pa_alsa_port_data *data;
1448
1449 pa_assert(u);
1450 pa_assert(p);
1451 pa_assert(u->mixer_handle);
1452
1453 data = PA_DEVICE_PORT_DATA(p);
1454
1455 pa_assert_se(u->mixer_path = data->path);
1456 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1457
1458 mixer_volume_init(u);
1459
1460 if (data->setting)
1461 pa_alsa_setting_select(data->setting, u->mixer_handle);
1462
1463 if (s->set_mute)
1464 s->set_mute(s);
1465 if (s->set_volume)
1466 s->set_volume(s);
1467
1468 return 0;
1469 }
1470
1471 static void sink_update_requested_latency_cb(pa_sink *s) {
1472 struct userdata *u = s->userdata;
1473 size_t before;
1474 pa_assert(u);
1475 pa_assert(u->use_tsched); /* only when timer scheduling is used
1476 * we can dynamically adjust the
1477 * latency */
1478
1479 if (!u->pcm_handle)
1480 return;
1481
1482 before = u->hwbuf_unused;
1483 update_sw_params(u);
1484
1485 /* Let's check whether we now use only a smaller part of the
1486 buffer then before. If so, we need to make sure that subsequent
1487 rewinds are relative to the new maximum fill level and not to the
1488 current fill level. Thus, let's do a full rewind once, to clear
1489 things up. */
1490
1491 if (u->hwbuf_unused > before) {
1492 pa_log_debug("Requesting rewind due to latency change.");
1493 pa_sink_request_rewind(s, (size_t) -1);
1494 }
1495 }
1496
1497 static pa_idxset* sink_get_formats(pa_sink *s) {
1498 struct userdata *u = s->userdata;
1499 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1500 pa_format_info *f;
1501 uint32_t idx;
1502
1503 pa_assert(u);
1504
1505 PA_IDXSET_FOREACH(f, u->formats, idx) {
1506 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1507 }
1508
1509 return ret;
1510 }
1511
1512 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1513 struct userdata *u = s->userdata;
1514 pa_format_info *f;
1515 uint32_t idx;
1516
1517 pa_assert(u);
1518
1519 /* FIXME: also validate sample rates against what the device supports */
1520 PA_IDXSET_FOREACH(f, formats, idx) {
1521 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1522 /* EAC3 cannot be sent over over S/PDIF */
1523 return FALSE;
1524 }
1525
1526 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1527 u->formats = pa_idxset_new(NULL, NULL);
1528
1529 /* Note: the logic below won't apply if we're using software encoding.
1530 * This is fine for now since we don't support that via the passthrough
1531 * framework, but this must be changed if we do. */
1532
1533 /* First insert non-PCM formats since we prefer those. */
1534 PA_IDXSET_FOREACH(f, formats, idx) {
1535 if (!pa_format_info_is_pcm(f))
1536 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1537 }
1538
1539 /* Now add any PCM formats */
1540 PA_IDXSET_FOREACH(f, formats, idx) {
1541 if (pa_format_info_is_pcm(f))
1542 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1543 }
1544
1545 return TRUE;
1546 }
1547
1548 static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
1549 {
1550 struct userdata *u = s->userdata;
1551 pa_assert(u);
1552
1553 if (!PA_SINK_IS_OPENED(s->state)) {
1554 pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
1555 u->sink->sample_spec.rate = rate;
1556 return TRUE;
1557 }
1558 return FALSE;
1559 }
1560
1561 static int process_rewind(struct userdata *u) {
1562 snd_pcm_sframes_t unused;
1563 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1564 pa_assert(u);
1565
1566 /* Figure out how much we shall rewind and reset the counter */
1567 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1568
1569 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1570
1571 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1572 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1573 return -1;
1574 }
1575
1576 unused_nbytes = (size_t) unused * u->frame_size;
1577
1578 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1579 unused_nbytes += u->rewind_safeguard;
1580
1581 if (u->hwbuf_size > unused_nbytes)
1582 limit_nbytes = u->hwbuf_size - unused_nbytes;
1583 else
1584 limit_nbytes = 0;
1585
1586 if (rewind_nbytes > limit_nbytes)
1587 rewind_nbytes = limit_nbytes;
1588
1589 if (rewind_nbytes > 0) {
1590 snd_pcm_sframes_t in_frames, out_frames;
1591
1592 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1593
1594 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1595 pa_log_debug("before: %lu", (unsigned long) in_frames);
1596 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1597 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1598 if (try_recover(u, "process_rewind", out_frames) < 0)
1599 return -1;
1600 out_frames = 0;
1601 }
1602
1603 pa_log_debug("after: %lu", (unsigned long) out_frames);
1604
1605 rewind_nbytes = (size_t) out_frames * u->frame_size;
1606
1607 if (rewind_nbytes <= 0)
1608 pa_log_info("Tried rewind, but was apparently not possible.");
1609 else {
1610 u->write_count -= rewind_nbytes;
1611 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1612 pa_sink_process_rewind(u->sink, rewind_nbytes);
1613
1614 u->after_rewind = TRUE;
1615 return 0;
1616 }
1617 } else
1618 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1619
1620 pa_sink_process_rewind(u->sink, 0);
1621 return 0;
1622 }
1623
1624 static void thread_func(void *userdata) {
1625 struct userdata *u = userdata;
1626 unsigned short revents = 0;
1627
1628 pa_assert(u);
1629
1630 pa_log_debug("Thread starting up");
1631
1632 if (u->core->realtime_scheduling)
1633 pa_make_realtime(u->core->realtime_priority);
1634
1635 pa_thread_mq_install(&u->thread_mq);
1636
1637 for (;;) {
1638 int ret;
1639 pa_usec_t rtpoll_sleep = 0;
1640
1641 #ifdef DEBUG_TIMING
1642 pa_log_debug("Loop");
1643 #endif
1644
1645 /* Render some data and write it to the dsp */
1646 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1647 int work_done;
1648 pa_usec_t sleep_usec = 0;
1649 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1650
1651 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1652 if (process_rewind(u) < 0)
1653 goto fail;
1654
1655 if (u->use_mmap)
1656 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1657 else
1658 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1659
1660 if (work_done < 0)
1661 goto fail;
1662
1663 /* pa_log_debug("work_done = %i", work_done); */
1664
1665 if (work_done) {
1666
1667 if (u->first) {
1668 pa_log_info("Starting playback.");
1669 snd_pcm_start(u->pcm_handle);
1670
1671 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1672
1673 u->first = FALSE;
1674 }
1675
1676 update_smoother(u);
1677 }
1678
1679 if (u->use_tsched) {
1680 pa_usec_t cusec;
1681
1682 if (u->since_start <= u->hwbuf_size) {
1683
1684 /* USB devices on ALSA seem to hit a buffer
1685 * underrun during the first iterations much
1686 * quicker then we calculate here, probably due to
1687 * the transport latency. To accommodate for that
1688 * we artificially decrease the sleep time until
1689 * we have filled the buffer at least once
1690 * completely.*/
1691
1692 if (pa_log_ratelimit(PA_LOG_DEBUG))
1693 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1694 sleep_usec /= 2;
1695 }
1696
1697 /* OK, the playback buffer is now full, let's
1698 * calculate when to wake up next */
1699 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1700
1701 /* Convert from the sound card time domain to the
1702 * system time domain */
1703 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1704
1705 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1706
1707 /* We don't trust the conversion, so we wake up whatever comes first */
1708 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1709 }
1710
1711 u->after_rewind = FALSE;
1712
1713 }
1714
1715 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1716 pa_usec_t volume_sleep;
1717 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1718 if (volume_sleep > 0) {
1719 if (rtpoll_sleep > 0)
1720 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1721 else
1722 rtpoll_sleep = volume_sleep;
1723 }
1724 }
1725
1726 if (rtpoll_sleep > 0)
1727 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1728 else
1729 pa_rtpoll_set_timer_disabled(u->rtpoll);
1730
1731 /* Hmm, nothing to do. Let's sleep */
1732 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1733 goto fail;
1734
1735 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
1736 pa_sink_volume_change_apply(u->sink, NULL);
1737
1738 if (ret == 0)
1739 goto finish;
1740
1741 /* Tell ALSA about this and process its response */
1742 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1743 struct pollfd *pollfd;
1744 int err;
1745 unsigned n;
1746
1747 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1748
1749 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1750 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1751 goto fail;
1752 }
1753
1754 if (revents & ~POLLOUT) {
1755 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1756 goto fail;
1757
1758 u->first = TRUE;
1759 u->since_start = 0;
1760 revents = 0;
1761 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1762 pa_log_debug("Wakeup from ALSA!");
1763
1764 } else
1765 revents = 0;
1766 }
1767
1768 fail:
1769 /* If this was no regular exit from the loop we have to continue
1770 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1771 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1772 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1773
1774 finish:
1775 pa_log_debug("Thread shutting down");
1776 }
1777
1778 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1779 const char *n;
1780 char *t;
1781
1782 pa_assert(data);
1783 pa_assert(ma);
1784 pa_assert(device_name);
1785
1786 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1787 pa_sink_new_data_set_name(data, n);
1788 data->namereg_fail = TRUE;
1789 return;
1790 }
1791
1792 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1793 data->namereg_fail = TRUE;
1794 else {
1795 n = device_id ? device_id : device_name;
1796 data->namereg_fail = FALSE;
1797 }
1798
1799 if (mapping)
1800 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1801 else
1802 t = pa_sprintf_malloc("alsa_output.%s", n);
1803
1804 pa_sink_new_data_set_name(data, t);
1805 pa_xfree(t);
1806 }
1807
1808 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1809
1810 if (!mapping && !element)
1811 return;
1812
1813 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1814 pa_log_info("Failed to find a working mixer device.");
1815 return;
1816 }
1817
1818 if (element) {
1819
1820 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1821 goto fail;
1822
1823 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1824 goto fail;
1825
1826 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1827 pa_alsa_path_dump(u->mixer_path);
1828 } else {
1829
1830 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT, u->paths_dir)))
1831 goto fail;
1832
1833 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1834 }
1835
1836 return;
1837
1838 fail:
1839
1840 if (u->mixer_path_set) {
1841 pa_alsa_path_set_free(u->mixer_path_set);
1842 u->mixer_path_set = NULL;
1843 } else if (u->mixer_path) {
1844 pa_alsa_path_free(u->mixer_path);
1845 u->mixer_path = NULL;
1846 }
1847
1848 if (u->mixer_handle) {
1849 snd_mixer_close(u->mixer_handle);
1850 u->mixer_handle = NULL;
1851 }
1852 }
1853
1854
1855 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1856 pa_bool_t need_mixer_callback = FALSE;
1857
1858 pa_assert(u);
1859
1860 if (!u->mixer_handle)
1861 return 0;
1862
1863 if (u->sink->active_port) {
1864 pa_alsa_port_data *data;
1865
1866 /* We have a list of supported paths, so let's activate the
1867 * one that has been chosen as active */
1868
1869 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1870 u->mixer_path = data->path;
1871
1872 pa_alsa_path_select(data->path, u->mixer_handle);
1873
1874 if (data->setting)
1875 pa_alsa_setting_select(data->setting, u->mixer_handle);
1876
1877 } else {
1878
1879 if (!u->mixer_path && u->mixer_path_set)
1880 u->mixer_path = u->mixer_path_set->paths;
1881
1882 if (u->mixer_path) {
1883 /* Hmm, we have only a single path, then let's activate it */
1884
1885 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1886
1887 if (u->mixer_path->settings)
1888 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1889 } else
1890 return 0;
1891 }
1892
1893 mixer_volume_init(u);
1894
1895 /* Will we need to register callbacks? */
1896 if (u->mixer_path_set && u->mixer_path_set->paths) {
1897 pa_alsa_path *p;
1898
1899 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1900 if (p->has_volume || p->has_mute)
1901 need_mixer_callback = TRUE;
1902 }
1903 }
1904 else if (u->mixer_path)
1905 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1906
1907 if (need_mixer_callback) {
1908 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1909 if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1910 u->mixer_pd = pa_alsa_mixer_pdata_new();
1911 mixer_callback = io_mixer_callback;
1912
1913 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1914 pa_log("Failed to initialize file descriptor monitoring");
1915 return -1;
1916 }
1917 } else {
1918 u->mixer_fdl = pa_alsa_fdlist_new();
1919 mixer_callback = ctl_mixer_callback;
1920
1921 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1922 pa_log("Failed to initialize file descriptor monitoring");
1923 return -1;
1924 }
1925 }
1926
1927 if (u->mixer_path_set)
1928 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1929 else
1930 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1931 }
1932
1933 return 0;
1934 }
1935
1936 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1937
1938 struct userdata *u = NULL;
1939 const char *dev_id = NULL;
1940 pa_sample_spec ss;
1941 uint32_t alternate_sample_rate;
1942 pa_channel_map map;
1943 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1944 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1945 size_t frame_size;
1946 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE;
1947 pa_sink_new_data data;
1948 pa_alsa_profile_set *profile_set = NULL;
1949
1950 pa_assert(m);
1951 pa_assert(ma);
1952
1953 ss = m->core->default_sample_spec;
1954 map = m->core->default_channel_map;
1955 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1956 pa_log("Failed to parse sample specification and channel map");
1957 goto fail;
1958 }
1959
1960 alternate_sample_rate = m->core->alternate_sample_rate;
1961 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1962 pa_log("Failed to parse alternate sample rate");
1963 goto fail;
1964 }
1965
1966 frame_size = pa_frame_size(&ss);
1967
1968 nfrags = m->core->default_n_fragments;
1969 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1970 if (frag_size <= 0)
1971 frag_size = (uint32_t) frame_size;
1972 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1973 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1974
1975 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1976 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1977 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1978 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1979 pa_log("Failed to parse buffer metrics");
1980 goto fail;
1981 }
1982
1983 buffer_size = nfrags * frag_size;
1984
1985 period_frames = frag_size/frame_size;
1986 buffer_frames = buffer_size/frame_size;
1987 tsched_frames = tsched_size/frame_size;
1988
1989 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1990 pa_log("Failed to parse mmap argument.");
1991 goto fail;
1992 }
1993
1994 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1995 pa_log("Failed to parse tsched argument.");
1996 goto fail;
1997 }
1998
1999 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2000 pa_log("Failed to parse ignore_dB argument.");
2001 goto fail;
2002 }
2003
2004 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2005 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2006 pa_log("Failed to parse rewind_safeguard argument");
2007 goto fail;
2008 }
2009
2010 deferred_volume = m->core->deferred_volume;
2011 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2012 pa_log("Failed to parse deferred_volume argument.");
2013 goto fail;
2014 }
2015
2016 use_tsched = pa_alsa_may_tsched(use_tsched);
2017
2018 u = pa_xnew0(struct userdata, 1);
2019 u->core = m->core;
2020 u->module = m;
2021 u->use_mmap = use_mmap;
2022 u->use_tsched = use_tsched;
2023 u->deferred_volume = deferred_volume;
2024 u->first = TRUE;
2025 u->rewind_safeguard = rewind_safeguard;
2026 u->rtpoll = pa_rtpoll_new();
2027 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
2028
2029 u->smoother = pa_smoother_new(
2030 SMOOTHER_ADJUST_USEC,
2031 SMOOTHER_WINDOW_USEC,
2032 TRUE,
2033 TRUE,
2034 5,
2035 pa_rtclock_now(),
2036 TRUE);
2037 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2038
2039 dev_id = pa_modargs_get_value(
2040 ma, "device_id",
2041 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2042
2043 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2044
2045 if (reserve_init(u, dev_id) < 0)
2046 goto fail;
2047
2048 if (reserve_monitor_init(u, dev_id) < 0)
2049 goto fail;
2050
2051 b = use_mmap;
2052 d = use_tsched;
2053
2054 if (mapping) {
2055
2056 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2057 pa_log("device_id= not set");
2058 goto fail;
2059 }
2060
2061 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2062 dev_id,
2063 &u->device_name,
2064 &ss, &map,
2065 SND_PCM_STREAM_PLAYBACK,
2066 &period_frames, &buffer_frames, tsched_frames,
2067 &b, &d, mapping)))
2068 goto fail;
2069
2070 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2071
2072 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2073 goto fail;
2074
2075 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2076 dev_id,
2077 &u->device_name,
2078 &ss, &map,
2079 SND_PCM_STREAM_PLAYBACK,
2080 &period_frames, &buffer_frames, tsched_frames,
2081 &b, &d, profile_set, &mapping)))
2082 goto fail;
2083
2084 } else {
2085
2086 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2087 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2088 &u->device_name,
2089 &ss, &map,
2090 SND_PCM_STREAM_PLAYBACK,
2091 &period_frames, &buffer_frames, tsched_frames,
2092 &b, &d, FALSE)))
2093 goto fail;
2094 }
2095
2096 pa_assert(u->device_name);
2097 pa_log_info("Successfully opened device %s.", u->device_name);
2098
2099 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2100 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2101 goto fail;
2102 }
2103
2104 if (mapping)
2105 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2106
2107 if (use_mmap && !b) {
2108 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2109 u->use_mmap = use_mmap = FALSE;
2110 }
2111
2112 if (use_tsched && (!b || !d)) {
2113 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2114 u->use_tsched = use_tsched = FALSE;
2115 }
2116
2117 if (u->use_mmap)
2118 pa_log_info("Successfully enabled mmap() mode.");
2119
2120 if (u->use_tsched)
2121 pa_log_info("Successfully enabled timer-based scheduling mode.");
2122
2123 if (is_iec958(u) || is_hdmi(u))
2124 set_formats = TRUE;
2125
2126 /* ALSA might tweak the sample spec, so recalculate the frame size */
2127 frame_size = pa_frame_size(&ss);
2128
2129 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2130
2131 pa_sink_new_data_init(&data);
2132 data.driver = driver;
2133 data.module = m;
2134 data.card = card;
2135 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2136
2137 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2138 * variable instead of using &data.namereg_fail directly, because
2139 * data.namereg_fail is a bitfield and taking the address of a bitfield
2140 * variable is impossible. */
2141 namereg_fail = data.namereg_fail;
2142 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2143 pa_log("Failed to parse namereg_fail argument.");
2144 pa_sink_new_data_done(&data);
2145 goto fail;
2146 }
2147 data.namereg_fail = namereg_fail;
2148
2149 pa_sink_new_data_set_sample_spec(&data, &ss);
2150 pa_sink_new_data_set_channel_map(&data, &map);
2151 pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2152
2153 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2154 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2155 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2156 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2157 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2158
2159 if (mapping) {
2160 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2161 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2162 }
2163
2164 pa_alsa_init_description(data.proplist);
2165
2166 if (u->control_device)
2167 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2168
2169 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2170 pa_log("Invalid properties");
2171 pa_sink_new_data_done(&data);
2172 goto fail;
2173 }
2174
2175 if (u->mixer_path_set)
2176 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2177
2178 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2179 (set_formats ? PA_SINK_SET_FORMATS : 0));
2180 pa_sink_new_data_done(&data);
2181
2182 if (!u->sink) {
2183 pa_log("Failed to create sink object");
2184 goto fail;
2185 }
2186
2187 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2188 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2189 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2190 goto fail;
2191 }
2192
2193 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2194 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2195 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2196 goto fail;
2197 }
2198
2199 u->sink->parent.process_msg = sink_process_msg;
2200 if (u->use_tsched)
2201 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2202 u->sink->set_state = sink_set_state_cb;
2203 u->sink->set_port = sink_set_port_cb;
2204 u->sink->update_rate = sink_update_rate_cb;
2205 u->sink->userdata = u;
2206
2207 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2208 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2209
2210 u->frame_size = frame_size;
2211 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2212 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2213 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2214
2215 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2216 (double) u->hwbuf_size / (double) u->fragment_size,
2217 (long unsigned) u->fragment_size,
2218 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2219 (long unsigned) u->hwbuf_size,
2220 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2221
2222 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2223 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2224 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2225 else {
2226 pa_log_info("Disabling rewind for device %s", u->device_name);
2227 pa_sink_set_max_rewind(u->sink, 0);
2228 }
2229
2230 if (u->use_tsched) {
2231 u->tsched_watermark_ref = tsched_watermark;
2232 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2233 } else
2234 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2235
2236 reserve_update(u);
2237
2238 if (update_sw_params(u) < 0)
2239 goto fail;
2240
2241 if (setup_mixer(u, ignore_dB) < 0)
2242 goto fail;
2243
2244 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2245
2246 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2247 pa_log("Failed to create thread.");
2248 goto fail;
2249 }
2250
2251 /* Get initial mixer settings */
2252 if (data.volume_is_set) {
2253 if (u->sink->set_volume)
2254 u->sink->set_volume(u->sink);
2255 } else {
2256 if (u->sink->get_volume)
2257 u->sink->get_volume(u->sink);
2258 }
2259
2260 if (data.muted_is_set) {
2261 if (u->sink->set_mute)
2262 u->sink->set_mute(u->sink);
2263 } else {
2264 if (u->sink->get_mute)
2265 u->sink->get_mute(u->sink);
2266 }
2267
2268 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2269 u->sink->write_volume(u->sink);
2270
2271 if (set_formats) {
2272 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2273 pa_format_info *format;
2274
2275 /* To start with, we only support PCM formats. Other formats may be added
2276 * with pa_sink_set_formats().*/
2277 format = pa_format_info_new();
2278 format->encoding = PA_ENCODING_PCM;
2279 u->formats = pa_idxset_new(NULL, NULL);
2280 pa_idxset_put(u->formats, format, NULL);
2281
2282 u->sink->get_formats = sink_get_formats;
2283 u->sink->set_formats = sink_set_formats;
2284 }
2285
2286 pa_sink_put(u->sink);
2287
2288 if (profile_set)
2289 pa_alsa_profile_set_free(profile_set);
2290
2291 return u->sink;
2292
2293 fail:
2294
2295 if (u)
2296 userdata_free(u);
2297
2298 if (profile_set)
2299 pa_alsa_profile_set_free(profile_set);
2300
2301 return NULL;
2302 }
2303
2304 static void userdata_free(struct userdata *u) {
2305 pa_assert(u);
2306
2307 if (u->sink)
2308 pa_sink_unlink(u->sink);
2309
2310 if (u->thread) {
2311 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2312 pa_thread_free(u->thread);
2313 }
2314
2315 pa_thread_mq_done(&u->thread_mq);
2316
2317 if (u->sink)
2318 pa_sink_unref(u->sink);
2319
2320 if (u->memchunk.memblock)
2321 pa_memblock_unref(u->memchunk.memblock);
2322
2323 if (u->mixer_pd)
2324 pa_alsa_mixer_pdata_free(u->mixer_pd);
2325
2326 if (u->alsa_rtpoll_item)
2327 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2328
2329 if (u->rtpoll)
2330 pa_rtpoll_free(u->rtpoll);
2331
2332 if (u->pcm_handle) {
2333 snd_pcm_drop(u->pcm_handle);
2334 snd_pcm_close(u->pcm_handle);
2335 }
2336
2337 if (u->mixer_fdl)
2338 pa_alsa_fdlist_free(u->mixer_fdl);
2339
2340 if (u->mixer_path_set)
2341 pa_alsa_path_set_free(u->mixer_path_set);
2342 else if (u->mixer_path)
2343 pa_alsa_path_free(u->mixer_path);
2344
2345 if (u->mixer_handle)
2346 snd_mixer_close(u->mixer_handle);
2347
2348 if (u->smoother)
2349 pa_smoother_free(u->smoother);
2350
2351 if (u->formats)
2352 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2353
2354 reserve_done(u);
2355 monitor_done(u);
2356
2357 pa_xfree(u->device_name);
2358 pa_xfree(u->control_device);
2359 pa_xfree(u->paths_dir);
2360 pa_xfree(u);
2361 }
2362
2363 void pa_alsa_sink_free(pa_sink *s) {
2364 struct userdata *u;
2365
2366 pa_sink_assert_ref(s);
2367 pa_assert_se(u = s->userdata);
2368
2369 userdata_free(u);
2370 }