]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
7d205bfebf324fb9f46a14157dc94645d2e5e9d0
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91
92 struct userdata {
93 pa_core *core;
94 pa_module *module;
95 pa_sink *sink;
96
97 pa_thread *thread;
98 pa_thread_mq thread_mq;
99 pa_rtpoll *rtpoll;
100
101 snd_pcm_t *pcm_handle;
102
103 pa_alsa_fdlist *mixer_fdl;
104 pa_alsa_mixer_pdata *mixer_pd;
105 snd_mixer_t *mixer_handle;
106 pa_alsa_path_set *mixer_path_set;
107 pa_alsa_path *mixer_path;
108
109 pa_cvolume hardware_volume;
110
111 uint32_t old_rate;
112
113 size_t
114 frame_size,
115 fragment_size,
116 hwbuf_size,
117 tsched_watermark,
118 hwbuf_unused,
119 min_sleep,
120 min_wakeup,
121 watermark_inc_step,
122 watermark_dec_step,
123 watermark_inc_threshold,
124 watermark_dec_threshold,
125 rewind_safeguard;
126
127 pa_usec_t watermark_dec_not_before;
128
129 pa_memchunk memchunk;
130
131 char *device_name; /* name of the PCM device */
132 char *control_device; /* name of the control device */
133
134 pa_bool_t use_mmap:1, use_tsched:1, sync_volume:1;
135
136 pa_bool_t first, after_rewind;
137
138 pa_rtpoll_item *alsa_rtpoll_item;
139
140 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
141
142 pa_smoother *smoother;
143 uint64_t write_count;
144 uint64_t since_start;
145 pa_usec_t smoother_interval;
146 pa_usec_t last_smoother_update;
147
148 pa_idxset *formats;
149
150 pa_reserve_wrapper *reserve;
151 pa_hook_slot *reserve_slot;
152 pa_reserve_monitor_wrapper *monitor;
153 pa_hook_slot *monitor_slot;
154 };
155
156 static void userdata_free(struct userdata *u);
157
158 /* FIXME: Is there a better way to do this than device names? */
159 static pa_bool_t is_iec958(struct userdata *u) {
160 return (strncmp("iec958", u->device_name, 6) == 0);
161 }
162
163 static pa_bool_t is_hdmi(struct userdata *u) {
164 return (strncmp("hdmi", u->device_name, 4) == 0);
165 }
166
167 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
168 pa_assert(r);
169 pa_assert(u);
170
171 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
172 return PA_HOOK_CANCEL;
173
174 return PA_HOOK_OK;
175 }
176
177 static void reserve_done(struct userdata *u) {
178 pa_assert(u);
179
180 if (u->reserve_slot) {
181 pa_hook_slot_free(u->reserve_slot);
182 u->reserve_slot = NULL;
183 }
184
185 if (u->reserve) {
186 pa_reserve_wrapper_unref(u->reserve);
187 u->reserve = NULL;
188 }
189 }
190
191 static void reserve_update(struct userdata *u) {
192 const char *description;
193 pa_assert(u);
194
195 if (!u->sink || !u->reserve)
196 return;
197
198 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
199 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
200 }
201
202 static int reserve_init(struct userdata *u, const char *dname) {
203 char *rname;
204
205 pa_assert(u);
206 pa_assert(dname);
207
208 if (u->reserve)
209 return 0;
210
211 if (pa_in_system_mode())
212 return 0;
213
214 if (!(rname = pa_alsa_get_reserve_name(dname)))
215 return 0;
216
217 /* We are resuming, try to lock the device */
218 u->reserve = pa_reserve_wrapper_get(u->core, rname);
219 pa_xfree(rname);
220
221 if (!(u->reserve))
222 return -1;
223
224 reserve_update(u);
225
226 pa_assert(!u->reserve_slot);
227 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
228
229 return 0;
230 }
231
232 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
233 pa_bool_t b;
234
235 pa_assert(w);
236 pa_assert(u);
237
238 b = PA_PTR_TO_UINT(busy) && !u->reserve;
239
240 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
241 return PA_HOOK_OK;
242 }
243
244 static void monitor_done(struct userdata *u) {
245 pa_assert(u);
246
247 if (u->monitor_slot) {
248 pa_hook_slot_free(u->monitor_slot);
249 u->monitor_slot = NULL;
250 }
251
252 if (u->monitor) {
253 pa_reserve_monitor_wrapper_unref(u->monitor);
254 u->monitor = NULL;
255 }
256 }
257
258 static int reserve_monitor_init(struct userdata *u, const char *dname) {
259 char *rname;
260
261 pa_assert(u);
262 pa_assert(dname);
263
264 if (pa_in_system_mode())
265 return 0;
266
267 if (!(rname = pa_alsa_get_reserve_name(dname)))
268 return 0;
269
270 /* We are resuming, try to lock the device */
271 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
272 pa_xfree(rname);
273
274 if (!(u->monitor))
275 return -1;
276
277 pa_assert(!u->monitor_slot);
278 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
279
280 return 0;
281 }
282
283 static void fix_min_sleep_wakeup(struct userdata *u) {
284 size_t max_use, max_use_2;
285
286 pa_assert(u);
287 pa_assert(u->use_tsched);
288
289 max_use = u->hwbuf_size - u->hwbuf_unused;
290 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
291
292 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
293 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
294
295 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
296 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
297 }
298
299 static void fix_tsched_watermark(struct userdata *u) {
300 size_t max_use;
301 pa_assert(u);
302 pa_assert(u->use_tsched);
303
304 max_use = u->hwbuf_size - u->hwbuf_unused;
305
306 if (u->tsched_watermark > max_use - u->min_sleep)
307 u->tsched_watermark = max_use - u->min_sleep;
308
309 if (u->tsched_watermark < u->min_wakeup)
310 u->tsched_watermark = u->min_wakeup;
311 }
312
313 static void increase_watermark(struct userdata *u) {
314 size_t old_watermark;
315 pa_usec_t old_min_latency, new_min_latency;
316
317 pa_assert(u);
318 pa_assert(u->use_tsched);
319
320 /* First, just try to increase the watermark */
321 old_watermark = u->tsched_watermark;
322 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
323 fix_tsched_watermark(u);
324
325 if (old_watermark != u->tsched_watermark) {
326 pa_log_info("Increasing wakeup watermark to %0.2f ms",
327 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
328 return;
329 }
330
331 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
332 old_min_latency = u->sink->thread_info.min_latency;
333 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
334 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
335
336 if (old_min_latency != new_min_latency) {
337 pa_log_info("Increasing minimal latency to %0.2f ms",
338 (double) new_min_latency / PA_USEC_PER_MSEC);
339
340 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
341 }
342
343 /* When we reach this we're officialy fucked! */
344 }
345
346 static void decrease_watermark(struct userdata *u) {
347 size_t old_watermark;
348 pa_usec_t now;
349
350 pa_assert(u);
351 pa_assert(u->use_tsched);
352
353 now = pa_rtclock_now();
354
355 if (u->watermark_dec_not_before <= 0)
356 goto restart;
357
358 if (u->watermark_dec_not_before > now)
359 return;
360
361 old_watermark = u->tsched_watermark;
362
363 if (u->tsched_watermark < u->watermark_dec_step)
364 u->tsched_watermark = u->tsched_watermark / 2;
365 else
366 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
367
368 fix_tsched_watermark(u);
369
370 if (old_watermark != u->tsched_watermark)
371 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
372 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
373
374 /* We don't change the latency range*/
375
376 restart:
377 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
378 }
379
380 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
381 pa_usec_t usec, wm;
382
383 pa_assert(sleep_usec);
384 pa_assert(process_usec);
385
386 pa_assert(u);
387 pa_assert(u->use_tsched);
388
389 usec = pa_sink_get_requested_latency_within_thread(u->sink);
390
391 if (usec == (pa_usec_t) -1)
392 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
393
394 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
395
396 if (wm > usec)
397 wm = usec/2;
398
399 *sleep_usec = usec - wm;
400 *process_usec = wm;
401
402 #ifdef DEBUG_TIMING
403 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
404 (unsigned long) (usec / PA_USEC_PER_MSEC),
405 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
406 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
407 #endif
408 }
409
410 static int try_recover(struct userdata *u, const char *call, int err) {
411 pa_assert(u);
412 pa_assert(call);
413 pa_assert(err < 0);
414
415 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
416
417 pa_assert(err != -EAGAIN);
418
419 if (err == -EPIPE)
420 pa_log_debug("%s: Buffer underrun!", call);
421
422 if (err == -ESTRPIPE)
423 pa_log_debug("%s: System suspended!", call);
424
425 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
426 pa_log("%s: %s", call, pa_alsa_strerror(err));
427 return -1;
428 }
429
430 u->first = TRUE;
431 u->since_start = 0;
432 return 0;
433 }
434
435 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
436 size_t left_to_play;
437 pa_bool_t underrun = FALSE;
438
439 /* We use <= instead of < for this check here because an underrun
440 * only happens after the last sample was processed, not already when
441 * it is removed from the buffer. This is particularly important
442 * when block transfer is used. */
443
444 if (n_bytes <= u->hwbuf_size)
445 left_to_play = u->hwbuf_size - n_bytes;
446 else {
447
448 /* We got a dropout. What a mess! */
449 left_to_play = 0;
450 underrun = TRUE;
451
452 #ifdef DEBUG_TIMING
453 PA_DEBUG_TRAP;
454 #endif
455
456 if (!u->first && !u->after_rewind)
457 if (pa_log_ratelimit(PA_LOG_INFO))
458 pa_log_info("Underrun!");
459 }
460
461 #ifdef DEBUG_TIMING
462 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
463 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
464 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
465 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
466 #endif
467
468 if (u->use_tsched) {
469 pa_bool_t reset_not_before = TRUE;
470
471 if (!u->first && !u->after_rewind) {
472 if (underrun || left_to_play < u->watermark_inc_threshold)
473 increase_watermark(u);
474 else if (left_to_play > u->watermark_dec_threshold) {
475 reset_not_before = FALSE;
476
477 /* We decrease the watermark only if have actually
478 * been woken up by a timeout. If something else woke
479 * us up it's too easy to fulfill the deadlines... */
480
481 if (on_timeout)
482 decrease_watermark(u);
483 }
484 }
485
486 if (reset_not_before)
487 u->watermark_dec_not_before = 0;
488 }
489
490 return left_to_play;
491 }
492
493 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
494 pa_bool_t work_done = FALSE;
495 pa_usec_t max_sleep_usec = 0, process_usec = 0;
496 size_t left_to_play;
497 unsigned j = 0;
498
499 pa_assert(u);
500 pa_sink_assert_ref(u->sink);
501
502 if (u->use_tsched)
503 hw_sleep_time(u, &max_sleep_usec, &process_usec);
504
505 for (;;) {
506 snd_pcm_sframes_t n;
507 size_t n_bytes;
508 int r;
509 pa_bool_t after_avail = TRUE;
510
511 /* First we determine how many samples are missing to fill the
512 * buffer up to 100% */
513
514 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
515
516 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
517 continue;
518
519 return r;
520 }
521
522 n_bytes = (size_t) n * u->frame_size;
523
524 #ifdef DEBUG_TIMING
525 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
526 #endif
527
528 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
529 on_timeout = FALSE;
530
531 if (u->use_tsched)
532
533 /* We won't fill up the playback buffer before at least
534 * half the sleep time is over because otherwise we might
535 * ask for more data from the clients then they expect. We
536 * need to guarantee that clients only have to keep around
537 * a single hw buffer length. */
538
539 if (!polled &&
540 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
541 #ifdef DEBUG_TIMING
542 pa_log_debug("Not filling up, because too early.");
543 #endif
544 break;
545 }
546
547 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
548
549 if (polled)
550 PA_ONCE_BEGIN {
551 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
552 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
553 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
554 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
555 pa_strnull(dn));
556 pa_xfree(dn);
557 } PA_ONCE_END;
558
559 #ifdef DEBUG_TIMING
560 pa_log_debug("Not filling up, because not necessary.");
561 #endif
562 break;
563 }
564
565
566 if (++j > 10) {
567 #ifdef DEBUG_TIMING
568 pa_log_debug("Not filling up, because already too many iterations.");
569 #endif
570
571 break;
572 }
573
574 n_bytes -= u->hwbuf_unused;
575 polled = FALSE;
576
577 #ifdef DEBUG_TIMING
578 pa_log_debug("Filling up");
579 #endif
580
581 for (;;) {
582 pa_memchunk chunk;
583 void *p;
584 int err;
585 const snd_pcm_channel_area_t *areas;
586 snd_pcm_uframes_t offset, frames;
587 snd_pcm_sframes_t sframes;
588
589 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
590 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
591
592 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
593
594 if (!after_avail && err == -EAGAIN)
595 break;
596
597 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
598 continue;
599
600 return r;
601 }
602
603 /* Make sure that if these memblocks need to be copied they will fit into one slot */
604 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
605 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
606
607 if (!after_avail && frames == 0)
608 break;
609
610 pa_assert(frames > 0);
611 after_avail = FALSE;
612
613 /* Check these are multiples of 8 bit */
614 pa_assert((areas[0].first & 7) == 0);
615 pa_assert((areas[0].step & 7)== 0);
616
617 /* We assume a single interleaved memory buffer */
618 pa_assert((areas[0].first >> 3) == 0);
619 pa_assert((areas[0].step >> 3) == u->frame_size);
620
621 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
622
623 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
624 chunk.length = pa_memblock_get_length(chunk.memblock);
625 chunk.index = 0;
626
627 pa_sink_render_into_full(u->sink, &chunk);
628 pa_memblock_unref_fixed(chunk.memblock);
629
630 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
631
632 if (!after_avail && (int) sframes == -EAGAIN)
633 break;
634
635 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
636 continue;
637
638 return r;
639 }
640
641 work_done = TRUE;
642
643 u->write_count += frames * u->frame_size;
644 u->since_start += frames * u->frame_size;
645
646 #ifdef DEBUG_TIMING
647 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
648 #endif
649
650 if ((size_t) frames * u->frame_size >= n_bytes)
651 break;
652
653 n_bytes -= (size_t) frames * u->frame_size;
654 }
655 }
656
657 if (u->use_tsched) {
658 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
659 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
660
661 if (*sleep_usec > process_usec)
662 *sleep_usec -= process_usec;
663 else
664 *sleep_usec = 0;
665 } else
666 *sleep_usec = 0;
667
668 return work_done ? 1 : 0;
669 }
670
671 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
672 pa_bool_t work_done = FALSE;
673 pa_usec_t max_sleep_usec = 0, process_usec = 0;
674 size_t left_to_play;
675 unsigned j = 0;
676
677 pa_assert(u);
678 pa_sink_assert_ref(u->sink);
679
680 if (u->use_tsched)
681 hw_sleep_time(u, &max_sleep_usec, &process_usec);
682
683 for (;;) {
684 snd_pcm_sframes_t n;
685 size_t n_bytes;
686 int r;
687 pa_bool_t after_avail = TRUE;
688
689 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
690
691 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
692 continue;
693
694 return r;
695 }
696
697 n_bytes = (size_t) n * u->frame_size;
698 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
699 on_timeout = FALSE;
700
701 if (u->use_tsched)
702
703 /* We won't fill up the playback buffer before at least
704 * half the sleep time is over because otherwise we might
705 * ask for more data from the clients then they expect. We
706 * need to guarantee that clients only have to keep around
707 * a single hw buffer length. */
708
709 if (!polled &&
710 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
711 break;
712
713 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
714
715 if (polled)
716 PA_ONCE_BEGIN {
717 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
718 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
719 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
720 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
721 pa_strnull(dn));
722 pa_xfree(dn);
723 } PA_ONCE_END;
724
725 break;
726 }
727
728 if (++j > 10) {
729 #ifdef DEBUG_TIMING
730 pa_log_debug("Not filling up, because already too many iterations.");
731 #endif
732
733 break;
734 }
735
736 n_bytes -= u->hwbuf_unused;
737 polled = FALSE;
738
739 for (;;) {
740 snd_pcm_sframes_t frames;
741 void *p;
742
743 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
744
745 if (u->memchunk.length <= 0)
746 pa_sink_render(u->sink, n_bytes, &u->memchunk);
747
748 pa_assert(u->memchunk.length > 0);
749
750 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
751
752 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
753 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
754
755 p = pa_memblock_acquire(u->memchunk.memblock);
756 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
757 pa_memblock_release(u->memchunk.memblock);
758
759 if (PA_UNLIKELY(frames < 0)) {
760
761 if (!after_avail && (int) frames == -EAGAIN)
762 break;
763
764 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
765 continue;
766
767 return r;
768 }
769
770 if (!after_avail && frames == 0)
771 break;
772
773 pa_assert(frames > 0);
774 after_avail = FALSE;
775
776 u->memchunk.index += (size_t) frames * u->frame_size;
777 u->memchunk.length -= (size_t) frames * u->frame_size;
778
779 if (u->memchunk.length <= 0) {
780 pa_memblock_unref(u->memchunk.memblock);
781 pa_memchunk_reset(&u->memchunk);
782 }
783
784 work_done = TRUE;
785
786 u->write_count += frames * u->frame_size;
787 u->since_start += frames * u->frame_size;
788
789 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
790
791 if ((size_t) frames * u->frame_size >= n_bytes)
792 break;
793
794 n_bytes -= (size_t) frames * u->frame_size;
795 }
796 }
797
798 if (u->use_tsched) {
799 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
800 process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
801
802 if (*sleep_usec > process_usec)
803 *sleep_usec -= process_usec;
804 else
805 *sleep_usec = 0;
806 } else
807 *sleep_usec = 0;
808
809 return work_done ? 1 : 0;
810 }
811
812 static void update_smoother(struct userdata *u) {
813 snd_pcm_sframes_t delay = 0;
814 int64_t position;
815 int err;
816 pa_usec_t now1 = 0, now2;
817 snd_pcm_status_t *status;
818
819 snd_pcm_status_alloca(&status);
820
821 pa_assert(u);
822 pa_assert(u->pcm_handle);
823
824 /* Let's update the time smoother */
825
826 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec, FALSE)) < 0)) {
827 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
828 return;
829 }
830
831 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
832 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
833 else {
834 snd_htimestamp_t htstamp = { 0, 0 };
835 snd_pcm_status_get_htstamp(status, &htstamp);
836 now1 = pa_timespec_load(&htstamp);
837 }
838
839 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
840 if (now1 <= 0)
841 now1 = pa_rtclock_now();
842
843 /* check if the time since the last update is bigger than the interval */
844 if (u->last_smoother_update > 0)
845 if (u->last_smoother_update + u->smoother_interval > now1)
846 return;
847
848 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
849
850 if (PA_UNLIKELY(position < 0))
851 position = 0;
852
853 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
854
855 pa_smoother_put(u->smoother, now1, now2);
856
857 u->last_smoother_update = now1;
858 /* exponentially increase the update interval up to the MAX limit */
859 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
860 }
861
862 static pa_usec_t sink_get_latency(struct userdata *u) {
863 pa_usec_t r;
864 int64_t delay;
865 pa_usec_t now1, now2;
866
867 pa_assert(u);
868
869 now1 = pa_rtclock_now();
870 now2 = pa_smoother_get(u->smoother, now1);
871
872 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
873
874 r = delay >= 0 ? (pa_usec_t) delay : 0;
875
876 if (u->memchunk.memblock)
877 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
878
879 return r;
880 }
881
882 static int build_pollfd(struct userdata *u) {
883 pa_assert(u);
884 pa_assert(u->pcm_handle);
885
886 if (u->alsa_rtpoll_item)
887 pa_rtpoll_item_free(u->alsa_rtpoll_item);
888
889 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
890 return -1;
891
892 return 0;
893 }
894
895 /* Called from IO context */
896 static int suspend(struct userdata *u) {
897 pa_assert(u);
898 pa_assert(u->pcm_handle);
899
900 pa_smoother_pause(u->smoother, pa_rtclock_now());
901
902 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
903 * take awfully long with our long buffer sizes today. */
904 snd_pcm_close(u->pcm_handle);
905 u->pcm_handle = NULL;
906
907 if (u->alsa_rtpoll_item) {
908 pa_rtpoll_item_free(u->alsa_rtpoll_item);
909 u->alsa_rtpoll_item = NULL;
910 }
911
912 /* We reset max_rewind/max_request here to make sure that while we
913 * are suspended the old max_request/max_rewind values set before
914 * the suspend can influence the per-stream buffer of newly
915 * created streams, without their requirements having any
916 * influence on them. */
917 pa_sink_set_max_rewind_within_thread(u->sink, 0);
918 pa_sink_set_max_request_within_thread(u->sink, 0);
919
920 pa_log_info("Device suspended...");
921
922 return 0;
923 }
924
925 /* Called from IO context */
926 static int update_sw_params(struct userdata *u) {
927 snd_pcm_uframes_t avail_min;
928 int err;
929
930 pa_assert(u);
931
932 /* Use the full buffer if no one asked us for anything specific */
933 u->hwbuf_unused = 0;
934
935 if (u->use_tsched) {
936 pa_usec_t latency;
937
938 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
939 size_t b;
940
941 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
942
943 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
944
945 /* We need at least one sample in our buffer */
946
947 if (PA_UNLIKELY(b < u->frame_size))
948 b = u->frame_size;
949
950 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
951 }
952
953 fix_min_sleep_wakeup(u);
954 fix_tsched_watermark(u);
955 }
956
957 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
958
959 /* We need at last one frame in the used part of the buffer */
960 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
961
962 if (u->use_tsched) {
963 pa_usec_t sleep_usec, process_usec;
964
965 hw_sleep_time(u, &sleep_usec, &process_usec);
966 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
967 }
968
969 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
970
971 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
972 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
973 return err;
974 }
975
976 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
977 if (pa_alsa_pcm_is_hw(u->pcm_handle))
978 pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size);
979 else {
980 pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
981 pa_sink_set_max_rewind_within_thread(u->sink, 0);
982 }
983
984 return 0;
985 }
986
987 /* Called from IO context */
988 static int unsuspend(struct userdata *u) {
989 pa_sample_spec ss;
990 int err;
991 pa_bool_t b, d;
992 snd_pcm_uframes_t period_size, buffer_size;
993 char *device_name = NULL;
994
995 pa_assert(u);
996 pa_assert(!u->pcm_handle);
997
998 pa_log_info("Trying resume...");
999
1000 if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1001 /* Need to open device in NONAUDIO mode */
1002 int len = strlen(u->device_name) + 8;
1003
1004 device_name = pa_xmalloc(len);
1005 pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1006 }
1007
1008 if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1009 SND_PCM_NONBLOCK|
1010 SND_PCM_NO_AUTO_RESAMPLE|
1011 SND_PCM_NO_AUTO_CHANNELS|
1012 SND_PCM_NO_AUTO_FORMAT)) < 0) {
1013 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1014 goto fail;
1015 }
1016
1017 ss = u->sink->sample_spec;
1018 period_size = u->fragment_size / u->frame_size;
1019 buffer_size = u->hwbuf_size / u->frame_size;
1020 b = u->use_mmap;
1021 d = u->use_tsched;
1022
1023 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
1024 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1025 goto fail;
1026 }
1027
1028 if (b != u->use_mmap || d != u->use_tsched) {
1029 pa_log_warn("Resume failed, couldn't get original access mode.");
1030 goto fail;
1031 }
1032
1033 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1034 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1035 goto fail;
1036 }
1037
1038 if (period_size*u->frame_size != u->fragment_size ||
1039 buffer_size*u->frame_size != u->hwbuf_size) {
1040 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1041 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
1042 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
1043 goto fail;
1044 }
1045
1046 if (update_sw_params(u) < 0)
1047 goto fail;
1048
1049 if (build_pollfd(u) < 0)
1050 goto fail;
1051
1052 u->write_count = 0;
1053 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1054 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1055 u->last_smoother_update = 0;
1056
1057 u->first = TRUE;
1058 u->since_start = 0;
1059
1060 pa_log_info("Resumed successfully...");
1061
1062 pa_xfree(device_name);
1063 return 0;
1064
1065 fail:
1066 if (u->pcm_handle) {
1067 snd_pcm_close(u->pcm_handle);
1068 u->pcm_handle = NULL;
1069 }
1070
1071 pa_xfree(device_name);
1072
1073 return -PA_ERR_IO;
1074 }
1075
1076 /* Called from IO context */
1077 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1078 struct userdata *u = PA_SINK(o)->userdata;
1079
1080 switch (code) {
1081
1082 case PA_SINK_MESSAGE_FINISH_MOVE:
1083 case PA_SINK_MESSAGE_ADD_INPUT: {
1084 pa_sink_input *i = PA_SINK_INPUT(data);
1085 int r = 0;
1086
1087 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1088 break;
1089
1090 u->old_rate = u->sink->sample_spec.rate;
1091
1092 /* Passthrough format, see if we need to reset sink sample rate */
1093 if (u->sink->sample_spec.rate == i->thread_info.sample_spec.rate)
1094 break;
1095
1096 /* .. we do */
1097 if ((r = suspend(u)) < 0)
1098 return r;
1099
1100 u->sink->sample_spec.rate = i->thread_info.sample_spec.rate;
1101
1102 if ((r = unsuspend(u)) < 0)
1103 return r;
1104
1105 break;
1106 }
1107
1108 case PA_SINK_MESSAGE_START_MOVE:
1109 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1110 pa_sink_input *i = PA_SINK_INPUT(data);
1111 int r = 0;
1112
1113 if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
1114 break;
1115
1116 /* Passthrough format, see if we need to reset sink sample rate */
1117 if (u->sink->sample_spec.rate == u->old_rate)
1118 break;
1119
1120 /* .. we do */
1121 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((r = suspend(u)) < 0))
1122 return r;
1123
1124 u->sink->sample_spec.rate = u->old_rate;
1125
1126 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((r = unsuspend(u)) < 0))
1127 return r;
1128
1129 break;
1130 }
1131
1132 case PA_SINK_MESSAGE_GET_LATENCY: {
1133 pa_usec_t r = 0;
1134
1135 if (u->pcm_handle)
1136 r = sink_get_latency(u);
1137
1138 *((pa_usec_t*) data) = r;
1139
1140 return 0;
1141 }
1142
1143 case PA_SINK_MESSAGE_SET_STATE:
1144
1145 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1146
1147 case PA_SINK_SUSPENDED: {
1148 int r;
1149
1150 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1151
1152 if ((r = suspend(u)) < 0)
1153 return r;
1154
1155 break;
1156 }
1157
1158 case PA_SINK_IDLE:
1159 case PA_SINK_RUNNING: {
1160 int r;
1161
1162 if (u->sink->thread_info.state == PA_SINK_INIT) {
1163 if (build_pollfd(u) < 0)
1164 return -PA_ERR_IO;
1165 }
1166
1167 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1168 if ((r = unsuspend(u)) < 0)
1169 return r;
1170 }
1171
1172 break;
1173 }
1174
1175 case PA_SINK_UNLINKED:
1176 case PA_SINK_INIT:
1177 case PA_SINK_INVALID_STATE:
1178 ;
1179 }
1180
1181 break;
1182 }
1183
1184 return pa_sink_process_msg(o, code, data, offset, chunk);
1185 }
1186
1187 /* Called from main context */
1188 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1189 pa_sink_state_t old_state;
1190 struct userdata *u;
1191
1192 pa_sink_assert_ref(s);
1193 pa_assert_se(u = s->userdata);
1194
1195 old_state = pa_sink_get_state(u->sink);
1196
1197 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1198 reserve_done(u);
1199 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1200 if (reserve_init(u, u->device_name) < 0)
1201 return -PA_ERR_BUSY;
1202
1203 return 0;
1204 }
1205
1206 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1207 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1208
1209 pa_assert(u);
1210 pa_assert(u->mixer_handle);
1211
1212 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1213 return 0;
1214
1215 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1216 return 0;
1217
1218 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1219 pa_sink_get_volume(u->sink, TRUE);
1220 pa_sink_get_mute(u->sink, TRUE);
1221 }
1222
1223 return 0;
1224 }
1225
1226 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1227 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1228
1229 pa_assert(u);
1230 pa_assert(u->mixer_handle);
1231
1232 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1233 return 0;
1234
1235 if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1236 return 0;
1237
1238 if (mask & SND_CTL_EVENT_MASK_VALUE)
1239 pa_sink_update_volume_and_mute(u->sink);
1240
1241 return 0;
1242 }
1243
1244 static void sink_get_volume_cb(pa_sink *s) {
1245 struct userdata *u = s->userdata;
1246 pa_cvolume r;
1247 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1248
1249 pa_assert(u);
1250 pa_assert(u->mixer_path);
1251 pa_assert(u->mixer_handle);
1252
1253 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1254 return;
1255
1256 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1257 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1258
1259 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1260
1261 if (u->mixer_path->has_dB) {
1262 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1263
1264 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1265 }
1266
1267 if (pa_cvolume_equal(&u->hardware_volume, &r))
1268 return;
1269
1270 s->real_volume = u->hardware_volume = r;
1271
1272 /* Hmm, so the hardware volume changed, let's reset our software volume */
1273 if (u->mixer_path->has_dB)
1274 pa_sink_set_soft_volume(s, NULL);
1275 }
1276
1277 static void sink_set_volume_cb(pa_sink *s) {
1278 struct userdata *u = s->userdata;
1279 pa_cvolume r;
1280 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1281 pa_bool_t sync_volume = !!(s->flags & PA_SINK_SYNC_VOLUME);
1282
1283 pa_assert(u);
1284 pa_assert(u->mixer_path);
1285 pa_assert(u->mixer_handle);
1286
1287 /* Shift up by the base volume */
1288 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1289
1290 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, sync_volume, !sync_volume) < 0)
1291 return;
1292
1293 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1294 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1295
1296 u->hardware_volume = r;
1297
1298 if (u->mixer_path->has_dB) {
1299 pa_cvolume new_soft_volume;
1300 pa_bool_t accurate_enough;
1301 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1302
1303 /* Match exactly what the user requested by software */
1304 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1305
1306 /* If the adjustment to do in software is only minimal we
1307 * can skip it. That saves us CPU at the expense of a bit of
1308 * accuracy */
1309 accurate_enough =
1310 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1311 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1312
1313 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1314 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1315 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1316 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1317 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1318 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1319 pa_yes_no(accurate_enough));
1320 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1321
1322 if (!accurate_enough)
1323 s->soft_volume = new_soft_volume;
1324
1325 } else {
1326 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1327
1328 /* We can't match exactly what the user requested, hence let's
1329 * at least tell the user about it */
1330
1331 s->real_volume = r;
1332 }
1333 }
1334
1335 static void sink_write_volume_cb(pa_sink *s) {
1336 struct userdata *u = s->userdata;
1337 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1338
1339 pa_assert(u);
1340 pa_assert(u->mixer_path);
1341 pa_assert(u->mixer_handle);
1342 pa_assert(s->flags & PA_SINK_SYNC_VOLUME);
1343
1344 /* Shift up by the base volume */
1345 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1346
1347 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1348 pa_log_error("Writing HW volume failed");
1349 else {
1350 pa_cvolume tmp_vol;
1351 pa_bool_t accurate_enough;
1352
1353 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1354 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1355
1356 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1357 accurate_enough =
1358 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1359 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1360
1361 if (!accurate_enough) {
1362 union {
1363 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1364 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1365 } vol;
1366
1367 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1368 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1369 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1370 pa_log_debug(" in dB: %s (request) != %s",
1371 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1372 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1373 }
1374 }
1375 }
1376
1377 static void sink_get_mute_cb(pa_sink *s) {
1378 struct userdata *u = s->userdata;
1379 pa_bool_t b;
1380
1381 pa_assert(u);
1382 pa_assert(u->mixer_path);
1383 pa_assert(u->mixer_handle);
1384
1385 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1386 return;
1387
1388 s->muted = b;
1389 }
1390
1391 static void sink_set_mute_cb(pa_sink *s) {
1392 struct userdata *u = s->userdata;
1393
1394 pa_assert(u);
1395 pa_assert(u->mixer_path);
1396 pa_assert(u->mixer_handle);
1397
1398 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1399 }
1400
1401 static void mixer_volume_init(struct userdata *u) {
1402 pa_assert(u);
1403
1404 if (!u->mixer_path->has_volume) {
1405 pa_sink_set_write_volume_callback(u->sink, NULL);
1406 pa_sink_set_get_volume_callback(u->sink, NULL);
1407 pa_sink_set_set_volume_callback(u->sink, NULL);
1408
1409 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1410 } else {
1411 pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1412 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1413
1414 if (u->mixer_path->has_dB && u->sync_volume) {
1415 pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1416 pa_log_info("Successfully enabled synchronous volume.");
1417 } else
1418 pa_sink_set_write_volume_callback(u->sink, NULL);
1419
1420 if (u->mixer_path->has_dB) {
1421 pa_sink_enable_decibel_volume(u->sink, TRUE);
1422 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1423
1424 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1425 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1426
1427 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1428 } else {
1429 pa_sink_enable_decibel_volume(u->sink, FALSE);
1430 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1431
1432 u->sink->base_volume = PA_VOLUME_NORM;
1433 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1434 }
1435
1436 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1437 }
1438
1439 if (!u->mixer_path->has_mute) {
1440 pa_sink_set_get_mute_callback(u->sink, NULL);
1441 pa_sink_set_set_mute_callback(u->sink, NULL);
1442 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1443 } else {
1444 pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1445 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1446 pa_log_info("Using hardware mute control.");
1447 }
1448 }
1449
1450 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1451 struct userdata *u = s->userdata;
1452 pa_alsa_port_data *data;
1453
1454 pa_assert(u);
1455 pa_assert(p);
1456 pa_assert(u->mixer_handle);
1457
1458 data = PA_DEVICE_PORT_DATA(p);
1459
1460 pa_assert_se(u->mixer_path = data->path);
1461 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1462
1463 mixer_volume_init(u);
1464
1465 if (data->setting)
1466 pa_alsa_setting_select(data->setting, u->mixer_handle);
1467
1468 if (s->set_mute)
1469 s->set_mute(s);
1470 if (s->set_volume)
1471 s->set_volume(s);
1472
1473 return 0;
1474 }
1475
1476 static void sink_update_requested_latency_cb(pa_sink *s) {
1477 struct userdata *u = s->userdata;
1478 size_t before;
1479 pa_assert(u);
1480 pa_assert(u->use_tsched); /* only when timer scheduling is used
1481 * we can dynamically adjust the
1482 * latency */
1483
1484 if (!u->pcm_handle)
1485 return;
1486
1487 before = u->hwbuf_unused;
1488 update_sw_params(u);
1489
1490 /* Let's check whether we now use only a smaller part of the
1491 buffer then before. If so, we need to make sure that subsequent
1492 rewinds are relative to the new maximum fill level and not to the
1493 current fill level. Thus, let's do a full rewind once, to clear
1494 things up. */
1495
1496 if (u->hwbuf_unused > before) {
1497 pa_log_debug("Requesting rewind due to latency change.");
1498 pa_sink_request_rewind(s, (size_t) -1);
1499 }
1500 }
1501
1502 static pa_idxset* sink_get_formats(pa_sink *s) {
1503 struct userdata *u = s->userdata;
1504 pa_idxset *ret = pa_idxset_new(NULL, NULL);
1505 pa_format_info *f;
1506 uint32_t idx;
1507
1508 pa_assert(u);
1509
1510 PA_IDXSET_FOREACH(f, u->formats, idx) {
1511 pa_idxset_put(ret, pa_format_info_copy(f), NULL);
1512 }
1513
1514 return ret;
1515 }
1516
1517 static pa_bool_t sink_set_formats(pa_sink *s, pa_idxset *formats) {
1518 struct userdata *u = s->userdata;
1519 pa_format_info *f;
1520 uint32_t idx;
1521
1522 pa_assert(u);
1523
1524 /* FIXME: also validate sample rates against what the device supports */
1525 PA_IDXSET_FOREACH(f, formats, idx) {
1526 if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1527 /* EAC3 cannot be sent over over S/PDIF */
1528 return FALSE;
1529 }
1530
1531 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
1532 u->formats = pa_idxset_new(NULL, NULL);
1533
1534 PA_IDXSET_FOREACH(f, formats, idx) {
1535 pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1536 }
1537
1538 return TRUE;
1539 }
1540
1541 static int process_rewind(struct userdata *u) {
1542 snd_pcm_sframes_t unused;
1543 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1544 pa_assert(u);
1545
1546 /* Figure out how much we shall rewind and reset the counter */
1547 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1548
1549 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1550
1551 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1552 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1553 return -1;
1554 }
1555
1556 unused_nbytes = (size_t) unused * u->frame_size;
1557
1558 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1559 unused_nbytes += u->rewind_safeguard;
1560
1561 if (u->hwbuf_size > unused_nbytes)
1562 limit_nbytes = u->hwbuf_size - unused_nbytes;
1563 else
1564 limit_nbytes = 0;
1565
1566 if (rewind_nbytes > limit_nbytes)
1567 rewind_nbytes = limit_nbytes;
1568
1569 if (rewind_nbytes > 0) {
1570 snd_pcm_sframes_t in_frames, out_frames;
1571
1572 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1573
1574 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1575 pa_log_debug("before: %lu", (unsigned long) in_frames);
1576 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1577 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1578 if (try_recover(u, "process_rewind", out_frames) < 0)
1579 return -1;
1580 out_frames = 0;
1581 }
1582
1583 pa_log_debug("after: %lu", (unsigned long) out_frames);
1584
1585 rewind_nbytes = (size_t) out_frames * u->frame_size;
1586
1587 if (rewind_nbytes <= 0)
1588 pa_log_info("Tried rewind, but was apparently not possible.");
1589 else {
1590 u->write_count -= rewind_nbytes;
1591 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1592 pa_sink_process_rewind(u->sink, rewind_nbytes);
1593
1594 u->after_rewind = TRUE;
1595 return 0;
1596 }
1597 } else
1598 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1599
1600 pa_sink_process_rewind(u->sink, 0);
1601 return 0;
1602 }
1603
1604 static void thread_func(void *userdata) {
1605 struct userdata *u = userdata;
1606 unsigned short revents = 0;
1607
1608 pa_assert(u);
1609
1610 pa_log_debug("Thread starting up");
1611
1612 if (u->core->realtime_scheduling)
1613 pa_make_realtime(u->core->realtime_priority);
1614
1615 pa_thread_mq_install(&u->thread_mq);
1616
1617 for (;;) {
1618 int ret;
1619 pa_usec_t rtpoll_sleep = 0;
1620
1621 #ifdef DEBUG_TIMING
1622 pa_log_debug("Loop");
1623 #endif
1624
1625 /* Render some data and write it to the dsp */
1626 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1627 int work_done;
1628 pa_usec_t sleep_usec = 0;
1629 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1630
1631 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1632 if (process_rewind(u) < 0)
1633 goto fail;
1634
1635 if (u->use_mmap)
1636 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1637 else
1638 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1639
1640 if (work_done < 0)
1641 goto fail;
1642
1643 /* pa_log_debug("work_done = %i", work_done); */
1644
1645 if (work_done) {
1646
1647 if (u->first) {
1648 pa_log_info("Starting playback.");
1649 snd_pcm_start(u->pcm_handle);
1650
1651 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1652
1653 u->first = FALSE;
1654 }
1655
1656 update_smoother(u);
1657 }
1658
1659 if (u->use_tsched) {
1660 pa_usec_t cusec;
1661
1662 if (u->since_start <= u->hwbuf_size) {
1663
1664 /* USB devices on ALSA seem to hit a buffer
1665 * underrun during the first iterations much
1666 * quicker then we calculate here, probably due to
1667 * the transport latency. To accommodate for that
1668 * we artificially decrease the sleep time until
1669 * we have filled the buffer at least once
1670 * completely.*/
1671
1672 if (pa_log_ratelimit(PA_LOG_DEBUG))
1673 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1674 sleep_usec /= 2;
1675 }
1676
1677 /* OK, the playback buffer is now full, let's
1678 * calculate when to wake up next */
1679 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1680
1681 /* Convert from the sound card time domain to the
1682 * system time domain */
1683 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1684
1685 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1686
1687 /* We don't trust the conversion, so we wake up whatever comes first */
1688 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1689 }
1690
1691 u->after_rewind = FALSE;
1692
1693 }
1694
1695 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1696 pa_usec_t volume_sleep;
1697 pa_sink_volume_change_apply(u->sink, &volume_sleep);
1698 if (volume_sleep > 0)
1699 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1700 }
1701
1702 if (rtpoll_sleep > 0)
1703 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1704 else
1705 pa_rtpoll_set_timer_disabled(u->rtpoll);
1706
1707 /* Hmm, nothing to do. Let's sleep */
1708 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1709 goto fail;
1710
1711 if (u->sink->flags & PA_SINK_SYNC_VOLUME)
1712 pa_sink_volume_change_apply(u->sink, NULL);
1713
1714 if (ret == 0)
1715 goto finish;
1716
1717 /* Tell ALSA about this and process its response */
1718 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1719 struct pollfd *pollfd;
1720 int err;
1721 unsigned n;
1722
1723 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1724
1725 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1726 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1727 goto fail;
1728 }
1729
1730 if (revents & ~POLLOUT) {
1731 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1732 goto fail;
1733
1734 u->first = TRUE;
1735 u->since_start = 0;
1736 revents = 0;
1737 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1738 pa_log_debug("Wakeup from ALSA!");
1739
1740 } else
1741 revents = 0;
1742 }
1743
1744 fail:
1745 /* If this was no regular exit from the loop we have to continue
1746 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1747 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1748 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1749
1750 finish:
1751 pa_log_debug("Thread shutting down");
1752 }
1753
1754 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1755 const char *n;
1756 char *t;
1757
1758 pa_assert(data);
1759 pa_assert(ma);
1760 pa_assert(device_name);
1761
1762 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1763 pa_sink_new_data_set_name(data, n);
1764 data->namereg_fail = TRUE;
1765 return;
1766 }
1767
1768 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1769 data->namereg_fail = TRUE;
1770 else {
1771 n = device_id ? device_id : device_name;
1772 data->namereg_fail = FALSE;
1773 }
1774
1775 if (mapping)
1776 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1777 else
1778 t = pa_sprintf_malloc("alsa_output.%s", n);
1779
1780 pa_sink_new_data_set_name(data, t);
1781 pa_xfree(t);
1782 }
1783
1784 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1785
1786 if (!mapping && !element)
1787 return;
1788
1789 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1790 pa_log_info("Failed to find a working mixer device.");
1791 return;
1792 }
1793
1794 if (element) {
1795
1796 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1797 goto fail;
1798
1799 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1800 goto fail;
1801
1802 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1803 pa_alsa_path_dump(u->mixer_path);
1804 } else {
1805
1806 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1807 goto fail;
1808
1809 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1810 }
1811
1812 return;
1813
1814 fail:
1815
1816 if (u->mixer_path_set) {
1817 pa_alsa_path_set_free(u->mixer_path_set);
1818 u->mixer_path_set = NULL;
1819 } else if (u->mixer_path) {
1820 pa_alsa_path_free(u->mixer_path);
1821 u->mixer_path = NULL;
1822 }
1823
1824 if (u->mixer_handle) {
1825 snd_mixer_close(u->mixer_handle);
1826 u->mixer_handle = NULL;
1827 }
1828 }
1829
1830
1831 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1832 pa_bool_t need_mixer_callback = FALSE;
1833
1834 pa_assert(u);
1835
1836 if (!u->mixer_handle)
1837 return 0;
1838
1839 if (u->sink->active_port) {
1840 pa_alsa_port_data *data;
1841
1842 /* We have a list of supported paths, so let's activate the
1843 * one that has been chosen as active */
1844
1845 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1846 u->mixer_path = data->path;
1847
1848 pa_alsa_path_select(data->path, u->mixer_handle);
1849
1850 if (data->setting)
1851 pa_alsa_setting_select(data->setting, u->mixer_handle);
1852
1853 } else {
1854
1855 if (!u->mixer_path && u->mixer_path_set)
1856 u->mixer_path = u->mixer_path_set->paths;
1857
1858 if (u->mixer_path) {
1859 /* Hmm, we have only a single path, then let's activate it */
1860
1861 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1862
1863 if (u->mixer_path->settings)
1864 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1865 } else
1866 return 0;
1867 }
1868
1869 mixer_volume_init(u);
1870
1871 /* Will we need to register callbacks? */
1872 if (u->mixer_path_set && u->mixer_path_set->paths) {
1873 pa_alsa_path *p;
1874
1875 PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1876 if (p->has_volume || p->has_mute)
1877 need_mixer_callback = TRUE;
1878 }
1879 }
1880 else if (u->mixer_path)
1881 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1882
1883 if (need_mixer_callback) {
1884 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1885 if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
1886 u->mixer_pd = pa_alsa_mixer_pdata_new();
1887 mixer_callback = io_mixer_callback;
1888
1889 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1890 pa_log("Failed to initialize file descriptor monitoring");
1891 return -1;
1892 }
1893 } else {
1894 u->mixer_fdl = pa_alsa_fdlist_new();
1895 mixer_callback = ctl_mixer_callback;
1896
1897 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1898 pa_log("Failed to initialize file descriptor monitoring");
1899 return -1;
1900 }
1901 }
1902
1903 if (u->mixer_path_set)
1904 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1905 else
1906 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1907 }
1908
1909 return 0;
1910 }
1911
1912 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1913
1914 struct userdata *u = NULL;
1915 const char *dev_id = NULL;
1916 pa_sample_spec ss, requested_ss;
1917 pa_channel_map map;
1918 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
1919 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1920 size_t frame_size;
1921 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE, set_formats = FALSE;
1922 pa_sink_new_data data;
1923 pa_alsa_profile_set *profile_set = NULL;
1924
1925 pa_assert(m);
1926 pa_assert(ma);
1927
1928 ss = m->core->default_sample_spec;
1929 map = m->core->default_channel_map;
1930 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1931 pa_log("Failed to parse sample specification and channel map");
1932 goto fail;
1933 }
1934
1935 requested_ss = ss;
1936 frame_size = pa_frame_size(&ss);
1937
1938 nfrags = m->core->default_n_fragments;
1939 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1940 if (frag_size <= 0)
1941 frag_size = (uint32_t) frame_size;
1942 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1943 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1944
1945 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1946 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1947 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1948 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1949 pa_log("Failed to parse buffer metrics");
1950 goto fail;
1951 }
1952
1953 buffer_size = nfrags * frag_size;
1954
1955 period_frames = frag_size/frame_size;
1956 buffer_frames = buffer_size/frame_size;
1957 tsched_frames = tsched_size/frame_size;
1958
1959 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1960 pa_log("Failed to parse mmap argument.");
1961 goto fail;
1962 }
1963
1964 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1965 pa_log("Failed to parse tsched argument.");
1966 goto fail;
1967 }
1968
1969 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1970 pa_log("Failed to parse ignore_dB argument.");
1971 goto fail;
1972 }
1973
1974 rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
1975 if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
1976 pa_log("Failed to parse rewind_safeguard argument");
1977 goto fail;
1978 }
1979
1980 sync_volume = m->core->sync_volume;
1981 if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1982 pa_log("Failed to parse sync_volume argument.");
1983 goto fail;
1984 }
1985
1986 use_tsched = pa_alsa_may_tsched(use_tsched);
1987
1988 u = pa_xnew0(struct userdata, 1);
1989 u->core = m->core;
1990 u->module = m;
1991 u->use_mmap = use_mmap;
1992 u->use_tsched = use_tsched;
1993 u->sync_volume = sync_volume;
1994 u->first = TRUE;
1995 u->rewind_safeguard = rewind_safeguard;
1996 u->rtpoll = pa_rtpoll_new();
1997 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1998
1999 u->smoother = pa_smoother_new(
2000 SMOOTHER_ADJUST_USEC,
2001 SMOOTHER_WINDOW_USEC,
2002 TRUE,
2003 TRUE,
2004 5,
2005 pa_rtclock_now(),
2006 TRUE);
2007 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2008
2009 dev_id = pa_modargs_get_value(
2010 ma, "device_id",
2011 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2012
2013 if (reserve_init(u, dev_id) < 0)
2014 goto fail;
2015
2016 if (reserve_monitor_init(u, dev_id) < 0)
2017 goto fail;
2018
2019 b = use_mmap;
2020 d = use_tsched;
2021
2022 if (mapping) {
2023
2024 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2025 pa_log("device_id= not set");
2026 goto fail;
2027 }
2028
2029 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2030 dev_id,
2031 &u->device_name,
2032 &ss, &map,
2033 SND_PCM_STREAM_PLAYBACK,
2034 &period_frames, &buffer_frames, tsched_frames,
2035 &b, &d, mapping)))
2036 goto fail;
2037
2038 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2039
2040 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2041 goto fail;
2042
2043 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2044 dev_id,
2045 &u->device_name,
2046 &ss, &map,
2047 SND_PCM_STREAM_PLAYBACK,
2048 &period_frames, &buffer_frames, tsched_frames,
2049 &b, &d, profile_set, &mapping)))
2050 goto fail;
2051
2052 } else {
2053
2054 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2055 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2056 &u->device_name,
2057 &ss, &map,
2058 SND_PCM_STREAM_PLAYBACK,
2059 &period_frames, &buffer_frames, tsched_frames,
2060 &b, &d, FALSE)))
2061 goto fail;
2062 }
2063
2064 pa_assert(u->device_name);
2065 pa_log_info("Successfully opened device %s.", u->device_name);
2066
2067 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2068 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2069 goto fail;
2070 }
2071
2072 if (mapping)
2073 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2074
2075 if (use_mmap && !b) {
2076 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2077 u->use_mmap = use_mmap = FALSE;
2078 }
2079
2080 if (use_tsched && (!b || !d)) {
2081 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2082 u->use_tsched = use_tsched = FALSE;
2083 }
2084
2085 if (u->use_mmap)
2086 pa_log_info("Successfully enabled mmap() mode.");
2087
2088 if (u->use_tsched)
2089 pa_log_info("Successfully enabled timer-based scheduling mode.");
2090
2091 if (is_iec958(u) || is_hdmi(u))
2092 set_formats = TRUE;
2093
2094 /* ALSA might tweak the sample spec, so recalculate the frame size */
2095 frame_size = pa_frame_size(&ss);
2096
2097 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2098
2099 pa_sink_new_data_init(&data);
2100 data.driver = driver;
2101 data.module = m;
2102 data.card = card;
2103 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2104
2105 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2106 * variable instead of using &data.namereg_fail directly, because
2107 * data.namereg_fail is a bitfield and taking the address of a bitfield
2108 * variable is impossible. */
2109 namereg_fail = data.namereg_fail;
2110 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2111 pa_log("Failed to parse namereg_fail argument.");
2112 pa_sink_new_data_done(&data);
2113 goto fail;
2114 }
2115 data.namereg_fail = namereg_fail;
2116
2117 pa_sink_new_data_set_sample_spec(&data, &ss);
2118 pa_sink_new_data_set_channel_map(&data, &map);
2119
2120 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2121 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2122 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2123 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2124 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2125
2126 if (mapping) {
2127 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2128 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2129 }
2130
2131 pa_alsa_init_description(data.proplist);
2132
2133 if (u->control_device)
2134 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2135
2136 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2137 pa_log("Invalid properties");
2138 pa_sink_new_data_done(&data);
2139 goto fail;
2140 }
2141
2142 if (u->mixer_path_set)
2143 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
2144
2145 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2146 (set_formats ? PA_SINK_SET_FORMATS : 0));
2147 pa_sink_new_data_done(&data);
2148
2149 if (!u->sink) {
2150 pa_log("Failed to create sink object");
2151 goto fail;
2152 }
2153
2154 if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
2155 &u->sink->thread_info.volume_change_safety_margin) < 0) {
2156 pa_log("Failed to parse sync_volume_safety_margin parameter");
2157 goto fail;
2158 }
2159
2160 if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
2161 &u->sink->thread_info.volume_change_extra_delay) < 0) {
2162 pa_log("Failed to parse sync_volume_extra_delay parameter");
2163 goto fail;
2164 }
2165
2166 u->sink->parent.process_msg = sink_process_msg;
2167 if (u->use_tsched)
2168 u->sink->update_requested_latency = sink_update_requested_latency_cb;
2169 u->sink->set_state = sink_set_state_cb;
2170 u->sink->set_port = sink_set_port_cb;
2171 u->sink->userdata = u;
2172
2173 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2174 pa_sink_set_rtpoll(u->sink, u->rtpoll);
2175
2176 u->frame_size = frame_size;
2177 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2178 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2179 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2180
2181 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2182 (double) u->hwbuf_size / (double) u->fragment_size,
2183 (long unsigned) u->fragment_size,
2184 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2185 (long unsigned) u->hwbuf_size,
2186 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2187
2188 pa_sink_set_max_request(u->sink, u->hwbuf_size);
2189 if (pa_alsa_pcm_is_hw(u->pcm_handle))
2190 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2191 else {
2192 pa_log_info("Disabling rewind for device %s", u->device_name);
2193 pa_sink_set_max_rewind(u->sink, 0);
2194 }
2195
2196 if (u->use_tsched) {
2197 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
2198
2199 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
2200 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
2201
2202 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
2203 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
2204
2205 fix_min_sleep_wakeup(u);
2206 fix_tsched_watermark(u);
2207
2208 pa_sink_set_latency_range(u->sink,
2209 0,
2210 pa_bytes_to_usec(u->hwbuf_size, &ss));
2211
2212 pa_log_info("Time scheduling watermark is %0.2fms",
2213 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
2214 } else
2215 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2216
2217 reserve_update(u);
2218
2219 if (update_sw_params(u) < 0)
2220 goto fail;
2221
2222 if (setup_mixer(u, ignore_dB) < 0)
2223 goto fail;
2224
2225 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2226
2227 if (!(u->thread = pa_thread_new("alsa-sink", thread_func, u))) {
2228 pa_log("Failed to create thread.");
2229 goto fail;
2230 }
2231
2232 /* Get initial mixer settings */
2233 if (data.volume_is_set) {
2234 if (u->sink->set_volume)
2235 u->sink->set_volume(u->sink);
2236 } else {
2237 if (u->sink->get_volume)
2238 u->sink->get_volume(u->sink);
2239 }
2240
2241 if (data.muted_is_set) {
2242 if (u->sink->set_mute)
2243 u->sink->set_mute(u->sink);
2244 } else {
2245 if (u->sink->get_mute)
2246 u->sink->get_mute(u->sink);
2247 }
2248
2249 if ((data.volume_is_set || data.muted_is_set) && u->sink->write_volume)
2250 u->sink->write_volume(u->sink);
2251
2252 if (set_formats) {
2253 /* For S/PDIF and HDMI, allow getting/setting custom formats */
2254 pa_format_info *format;
2255
2256 /* To start with, we only support PCM formats. Other formats may be added
2257 * with pa_sink_set_formats().*/
2258 format = pa_format_info_new();
2259 format->encoding = PA_ENCODING_PCM;
2260 u->formats = pa_idxset_new(NULL, NULL);
2261 pa_idxset_put(u->formats, format, NULL);
2262
2263 u->sink->get_formats = sink_get_formats;
2264 u->sink->set_formats = sink_set_formats;
2265 }
2266
2267 pa_sink_put(u->sink);
2268
2269 if (profile_set)
2270 pa_alsa_profile_set_free(profile_set);
2271
2272 return u->sink;
2273
2274 fail:
2275
2276 if (u)
2277 userdata_free(u);
2278
2279 if (profile_set)
2280 pa_alsa_profile_set_free(profile_set);
2281
2282 return NULL;
2283 }
2284
2285 static void userdata_free(struct userdata *u) {
2286 pa_assert(u);
2287
2288 if (u->sink)
2289 pa_sink_unlink(u->sink);
2290
2291 if (u->thread) {
2292 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2293 pa_thread_free(u->thread);
2294 }
2295
2296 pa_thread_mq_done(&u->thread_mq);
2297
2298 if (u->sink)
2299 pa_sink_unref(u->sink);
2300
2301 if (u->memchunk.memblock)
2302 pa_memblock_unref(u->memchunk.memblock);
2303
2304 if (u->mixer_pd)
2305 pa_alsa_mixer_pdata_free(u->mixer_pd);
2306
2307 if (u->alsa_rtpoll_item)
2308 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2309
2310 if (u->rtpoll)
2311 pa_rtpoll_free(u->rtpoll);
2312
2313 if (u->pcm_handle) {
2314 snd_pcm_drop(u->pcm_handle);
2315 snd_pcm_close(u->pcm_handle);
2316 }
2317
2318 if (u->mixer_fdl)
2319 pa_alsa_fdlist_free(u->mixer_fdl);
2320
2321 if (u->mixer_path_set)
2322 pa_alsa_path_set_free(u->mixer_path_set);
2323 else if (u->mixer_path)
2324 pa_alsa_path_free(u->mixer_path);
2325
2326 if (u->mixer_handle)
2327 snd_mixer_close(u->mixer_handle);
2328
2329 if (u->smoother)
2330 pa_smoother_free(u->smoother);
2331
2332 if (u->formats)
2333 pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2334
2335 reserve_done(u);
2336 monitor_done(u);
2337
2338 pa_xfree(u->device_name);
2339 pa_xfree(u->control_device);
2340 pa_xfree(u);
2341 }
2342
2343 void pa_alsa_sink_free(pa_sink *s) {
2344 struct userdata *u;
2345
2346 pa_sink_assert_ref(s);
2347 pa_assert_se(u = s->userdata);
2348
2349 userdata_free(u);
2350 }