]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Merge commit 'origin/master-tx'
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
68 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
69 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
70
71 struct userdata {
72 pa_core *core;
73 pa_module *module;
74 pa_sink *sink;
75
76 pa_thread *thread;
77 pa_thread_mq thread_mq;
78 pa_rtpoll *rtpoll;
79
80 snd_pcm_t *pcm_handle;
81
82 pa_alsa_fdlist *mixer_fdl;
83 snd_mixer_t *mixer_handle;
84 pa_alsa_path_set *mixer_path_set;
85 pa_alsa_path *mixer_path;
86
87 pa_cvolume hardware_volume;
88
89 size_t
90 frame_size,
91 fragment_size,
92 hwbuf_size,
93 tsched_watermark,
94 hwbuf_unused,
95 min_sleep,
96 min_wakeup,
97 watermark_step;
98
99 unsigned nfragments;
100 pa_memchunk memchunk;
101
102 char *device_name; /* name of the PCM device */
103 char *control_device; /* name of the control device */
104
105 pa_bool_t use_mmap:1, use_tsched:1;
106
107 pa_bool_t first, after_rewind;
108
109 pa_rtpoll_item *alsa_rtpoll_item;
110
111 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
112
113 pa_smoother *smoother;
114 uint64_t write_count;
115 uint64_t since_start;
116
117 pa_reserve_wrapper *reserve;
118 pa_hook_slot *reserve_slot;
119 pa_reserve_monitor_wrapper *monitor;
120 pa_hook_slot *monitor_slot;
121 };
122
123 static void userdata_free(struct userdata *u);
124
125 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
126 pa_assert(r);
127 pa_assert(u);
128
129 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
130 return PA_HOOK_CANCEL;
131
132 return PA_HOOK_OK;
133 }
134
135 static void reserve_done(struct userdata *u) {
136 pa_assert(u);
137
138 if (u->reserve_slot) {
139 pa_hook_slot_free(u->reserve_slot);
140 u->reserve_slot = NULL;
141 }
142
143 if (u->reserve) {
144 pa_reserve_wrapper_unref(u->reserve);
145 u->reserve = NULL;
146 }
147 }
148
149 static void reserve_update(struct userdata *u) {
150 const char *description;
151 pa_assert(u);
152
153 if (!u->sink || !u->reserve)
154 return;
155
156 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
157 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
158 }
159
160 static int reserve_init(struct userdata *u, const char *dname) {
161 char *rname;
162
163 pa_assert(u);
164 pa_assert(dname);
165
166 if (u->reserve)
167 return 0;
168
169 if (pa_in_system_mode())
170 return 0;
171
172 if (!(rname = pa_alsa_get_reserve_name(dname)))
173 return 0;
174
175 /* We are resuming, try to lock the device */
176 u->reserve = pa_reserve_wrapper_get(u->core, rname);
177 pa_xfree(rname);
178
179 if (!(u->reserve))
180 return -1;
181
182 reserve_update(u);
183
184 pa_assert(!u->reserve_slot);
185 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
186
187 return 0;
188 }
189
190 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
191 pa_bool_t b;
192
193 pa_assert(w);
194 pa_assert(u);
195
196 b = PA_PTR_TO_UINT(busy) && !u->reserve;
197
198 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
199 return PA_HOOK_OK;
200 }
201
202 static void monitor_done(struct userdata *u) {
203 pa_assert(u);
204
205 if (u->monitor_slot) {
206 pa_hook_slot_free(u->monitor_slot);
207 u->monitor_slot = NULL;
208 }
209
210 if (u->monitor) {
211 pa_reserve_monitor_wrapper_unref(u->monitor);
212 u->monitor = NULL;
213 }
214 }
215
216 static int reserve_monitor_init(struct userdata *u, const char *dname) {
217 char *rname;
218
219 pa_assert(u);
220 pa_assert(dname);
221
222 if (pa_in_system_mode())
223 return 0;
224
225 if (!(rname = pa_alsa_get_reserve_name(dname)))
226 return 0;
227
228 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
229 pa_xfree(rname);
230
231 if (!(u->monitor))
232 return -1;
233
234 pa_assert(!u->monitor_slot);
235 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
236
237 return 0;
238 }
239
240 static void fix_min_sleep_wakeup(struct userdata *u) {
241 size_t max_use, max_use_2;
242
243 pa_assert(u);
244
245 max_use = u->hwbuf_size - u->hwbuf_unused;
246 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
247
248 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
249 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
250
251 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
252 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
253 }
254
255 static void fix_tsched_watermark(struct userdata *u) {
256 size_t max_use;
257 pa_assert(u);
258
259 max_use = u->hwbuf_size - u->hwbuf_unused;
260
261 if (u->tsched_watermark > max_use - u->min_sleep)
262 u->tsched_watermark = max_use - u->min_sleep;
263
264 if (u->tsched_watermark < u->min_wakeup)
265 u->tsched_watermark = u->min_wakeup;
266 }
267
268 static void adjust_after_underrun(struct userdata *u) {
269 size_t old_watermark;
270 pa_usec_t old_min_latency, new_min_latency;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 /* First, just try to increase the watermark */
276 old_watermark = u->tsched_watermark;
277 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
278 fix_tsched_watermark(u);
279
280 if (old_watermark != u->tsched_watermark) {
281 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
282 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
283 return;
284 }
285
286 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
287 old_min_latency = u->sink->thread_info.min_latency;
288 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
289 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
290
291 if (old_min_latency != new_min_latency) {
292 pa_log_notice("Increasing minimal latency to %0.2f ms",
293 (double) new_min_latency / PA_USEC_PER_MSEC);
294
295 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
296 return;
297 }
298
299 /* When we reach this we're officialy fucked! */
300 }
301
302 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
303 pa_usec_t usec, wm;
304
305 pa_assert(sleep_usec);
306 pa_assert(process_usec);
307
308 pa_assert(u);
309
310 usec = pa_sink_get_requested_latency_within_thread(u->sink);
311
312 if (usec == (pa_usec_t) -1)
313 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
314
315 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
316
317 if (wm > usec)
318 wm = usec/2;
319
320 *sleep_usec = usec - wm;
321 *process_usec = wm;
322
323 #ifdef DEBUG_TIMING
324 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
325 (unsigned long) (usec / PA_USEC_PER_MSEC),
326 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
327 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
328 #endif
329 }
330
331 static int try_recover(struct userdata *u, const char *call, int err) {
332 pa_assert(u);
333 pa_assert(call);
334 pa_assert(err < 0);
335
336 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
337
338 pa_assert(err != -EAGAIN);
339
340 if (err == -EPIPE)
341 pa_log_debug("%s: Buffer underrun!", call);
342
343 if (err == -ESTRPIPE)
344 pa_log_debug("%s: System suspended!", call);
345
346 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
347 pa_log("%s: %s", call, pa_alsa_strerror(err));
348 return -1;
349 }
350
351 u->first = TRUE;
352 u->since_start = 0;
353 return 0;
354 }
355
356 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
357 size_t left_to_play;
358
359 /* We use <= instead of < for this check here because an underrun
360 * only happens after the last sample was processed, not already when
361 * it is removed from the buffer. This is particularly important
362 * when block transfer is used. */
363
364 if (n_bytes <= u->hwbuf_size) {
365 left_to_play = u->hwbuf_size - n_bytes;
366
367 #ifdef DEBUG_TIMING
368 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
369 #endif
370
371 } else {
372 left_to_play = 0;
373
374 #ifdef DEBUG_TIMING
375 PA_DEBUG_TRAP;
376 #endif
377
378 if (!u->first && !u->after_rewind) {
379
380 if (pa_log_ratelimit())
381 pa_log_info("Underrun!");
382
383 if (u->use_tsched)
384 adjust_after_underrun(u);
385 }
386 }
387
388 return left_to_play;
389 }
390
391 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
392 pa_bool_t work_done = TRUE;
393 pa_usec_t max_sleep_usec = 0, process_usec = 0;
394 size_t left_to_play;
395 unsigned j = 0;
396
397 pa_assert(u);
398 pa_sink_assert_ref(u->sink);
399
400 if (u->use_tsched)
401 hw_sleep_time(u, &max_sleep_usec, &process_usec);
402
403 for (;;) {
404 snd_pcm_sframes_t n;
405 size_t n_bytes;
406 int r;
407 pa_bool_t after_avail = TRUE;
408
409 /* First we determine how many samples are missing to fill the
410 * buffer up to 100% */
411
412 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
413
414 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
415 continue;
416
417 return r;
418 }
419
420 n_bytes = (size_t) n * u->frame_size;
421
422 #ifdef DEBUG_TIMING
423 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
424 #endif
425
426 left_to_play = check_left_to_play(u, n_bytes);
427
428 if (u->use_tsched)
429
430 /* We won't fill up the playback buffer before at least
431 * half the sleep time is over because otherwise we might
432 * ask for more data from the clients then they expect. We
433 * need to guarantee that clients only have to keep around
434 * a single hw buffer length. */
435
436 if (!polled &&
437 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
438 #ifdef DEBUG_TIMING
439 pa_log_debug("Not filling up, because too early.");
440 #endif
441 break;
442 }
443
444 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
445
446 if (polled)
447 PA_ONCE_BEGIN {
448 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
449 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
450 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
451 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
452 pa_strnull(dn));
453 pa_xfree(dn);
454 } PA_ONCE_END;
455
456 #ifdef DEBUG_TIMING
457 pa_log_debug("Not filling up, because not necessary.");
458 #endif
459 break;
460 }
461
462
463 if (++j > 10) {
464 #ifdef DEBUG_TIMING
465 pa_log_debug("Not filling up, because already too many iterations.");
466 #endif
467
468 break;
469 }
470
471 n_bytes -= u->hwbuf_unused;
472 polled = FALSE;
473
474 #ifdef DEBUG_TIMING
475 pa_log_debug("Filling up");
476 #endif
477
478 for (;;) {
479 pa_memchunk chunk;
480 void *p;
481 int err;
482 const snd_pcm_channel_area_t *areas;
483 snd_pcm_uframes_t offset, frames;
484 snd_pcm_sframes_t sframes;
485
486 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
487 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
488
489 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
490
491 if (!after_avail && err == -EAGAIN)
492 break;
493
494 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
495 continue;
496
497 return r;
498 }
499
500 /* Make sure that if these memblocks need to be copied they will fit into one slot */
501 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
502 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
503
504 if (!after_avail && frames == 0)
505 break;
506
507 pa_assert(frames > 0);
508 after_avail = FALSE;
509
510 /* Check these are multiples of 8 bit */
511 pa_assert((areas[0].first & 7) == 0);
512 pa_assert((areas[0].step & 7)== 0);
513
514 /* We assume a single interleaved memory buffer */
515 pa_assert((areas[0].first >> 3) == 0);
516 pa_assert((areas[0].step >> 3) == u->frame_size);
517
518 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
519
520 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
521 chunk.length = pa_memblock_get_length(chunk.memblock);
522 chunk.index = 0;
523
524 pa_sink_render_into_full(u->sink, &chunk);
525 pa_memblock_unref_fixed(chunk.memblock);
526
527 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
528
529 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
530 continue;
531
532 return r;
533 }
534
535 work_done = TRUE;
536
537 u->write_count += frames * u->frame_size;
538 u->since_start += frames * u->frame_size;
539
540 #ifdef DEBUG_TIMING
541 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
542 #endif
543
544 if ((size_t) frames * u->frame_size >= n_bytes)
545 break;
546
547 n_bytes -= (size_t) frames * u->frame_size;
548 }
549 }
550
551 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
552
553 if (*sleep_usec > process_usec)
554 *sleep_usec -= process_usec;
555 else
556 *sleep_usec = 0;
557
558 return work_done ? 1 : 0;
559 }
560
561 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
562 pa_bool_t work_done = FALSE;
563 pa_usec_t max_sleep_usec = 0, process_usec = 0;
564 size_t left_to_play;
565 unsigned j = 0;
566
567 pa_assert(u);
568 pa_sink_assert_ref(u->sink);
569
570 if (u->use_tsched)
571 hw_sleep_time(u, &max_sleep_usec, &process_usec);
572
573 for (;;) {
574 snd_pcm_sframes_t n;
575 size_t n_bytes;
576 int r;
577
578 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
579
580 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
581 continue;
582
583 return r;
584 }
585
586 n_bytes = (size_t) n * u->frame_size;
587 left_to_play = check_left_to_play(u, n_bytes);
588
589 if (u->use_tsched)
590
591 /* We won't fill up the playback buffer before at least
592 * half the sleep time is over because otherwise we might
593 * ask for more data from the clients then they expect. We
594 * need to guarantee that clients only have to keep around
595 * a single hw buffer length. */
596
597 if (!polled &&
598 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
599 break;
600
601 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
602
603 if (polled)
604 PA_ONCE_BEGIN {
605 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
606 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
607 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
608 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
609 pa_strnull(dn));
610 pa_xfree(dn);
611 } PA_ONCE_END;
612
613 break;
614 }
615
616 if (++j > 10) {
617 #ifdef DEBUG_TIMING
618 pa_log_debug("Not filling up, because already too many iterations.");
619 #endif
620
621 break;
622 }
623
624 n_bytes -= u->hwbuf_unused;
625 polled = FALSE;
626
627 for (;;) {
628 snd_pcm_sframes_t frames;
629 void *p;
630 pa_bool_t after_avail = TRUE;
631
632 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
633
634 if (u->memchunk.length <= 0)
635 pa_sink_render(u->sink, n_bytes, &u->memchunk);
636
637 pa_assert(u->memchunk.length > 0);
638
639 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
640
641 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
642 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
643
644 p = pa_memblock_acquire(u->memchunk.memblock);
645 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
646 pa_memblock_release(u->memchunk.memblock);
647
648 if (PA_UNLIKELY(frames < 0)) {
649
650 if (!after_avail && (int) frames == -EAGAIN)
651 break;
652
653 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
654 continue;
655
656 return r;
657 }
658
659 if (!after_avail && frames == 0)
660 break;
661
662 pa_assert(frames > 0);
663 after_avail = FALSE;
664
665 u->memchunk.index += (size_t) frames * u->frame_size;
666 u->memchunk.length -= (size_t) frames * u->frame_size;
667
668 if (u->memchunk.length <= 0) {
669 pa_memblock_unref(u->memchunk.memblock);
670 pa_memchunk_reset(&u->memchunk);
671 }
672
673 work_done = TRUE;
674
675 u->write_count += frames * u->frame_size;
676 u->since_start += frames * u->frame_size;
677
678 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
679
680 if ((size_t) frames * u->frame_size >= n_bytes)
681 break;
682
683 n_bytes -= (size_t) frames * u->frame_size;
684 }
685 }
686
687 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
688
689 if (*sleep_usec > process_usec)
690 *sleep_usec -= process_usec;
691 else
692 *sleep_usec = 0;
693
694 return work_done ? 1 : 0;
695 }
696
697 static void update_smoother(struct userdata *u) {
698 snd_pcm_sframes_t delay = 0;
699 int64_t position;
700 int err;
701 pa_usec_t now1 = 0, now2;
702 snd_pcm_status_t *status;
703
704 snd_pcm_status_alloca(&status);
705
706 pa_assert(u);
707 pa_assert(u->pcm_handle);
708
709 /* Let's update the time smoother */
710
711 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
712 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
713 return;
714 }
715
716 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
717 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
718 else {
719 snd_htimestamp_t htstamp = { 0, 0 };
720 snd_pcm_status_get_htstamp(status, &htstamp);
721 now1 = pa_timespec_load(&htstamp);
722 }
723
724 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
725
726 if (PA_UNLIKELY(position < 0))
727 position = 0;
728
729 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
730 if (now1 <= 0)
731 now1 = pa_rtclock_now();
732
733 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
734
735 pa_smoother_put(u->smoother, now1, now2);
736 }
737
738 static pa_usec_t sink_get_latency(struct userdata *u) {
739 pa_usec_t r;
740 int64_t delay;
741 pa_usec_t now1, now2;
742
743 pa_assert(u);
744
745 now1 = pa_rtclock_now();
746 now2 = pa_smoother_get(u->smoother, now1);
747
748 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
749
750 r = delay >= 0 ? (pa_usec_t) delay : 0;
751
752 if (u->memchunk.memblock)
753 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
754
755 return r;
756 }
757
758 static int build_pollfd(struct userdata *u) {
759 pa_assert(u);
760 pa_assert(u->pcm_handle);
761
762 if (u->alsa_rtpoll_item)
763 pa_rtpoll_item_free(u->alsa_rtpoll_item);
764
765 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
766 return -1;
767
768 return 0;
769 }
770
771 /* Called from IO context */
772 static int suspend(struct userdata *u) {
773 pa_assert(u);
774 pa_assert(u->pcm_handle);
775
776 pa_smoother_pause(u->smoother, pa_rtclock_now());
777
778 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
779 * take awfully long with our long buffer sizes today. */
780 snd_pcm_close(u->pcm_handle);
781 u->pcm_handle = NULL;
782
783 if (u->alsa_rtpoll_item) {
784 pa_rtpoll_item_free(u->alsa_rtpoll_item);
785 u->alsa_rtpoll_item = NULL;
786 }
787
788 pa_log_info("Device suspended...");
789
790 return 0;
791 }
792
793 /* Called from IO context */
794 static int update_sw_params(struct userdata *u) {
795 snd_pcm_uframes_t avail_min;
796 int err;
797
798 pa_assert(u);
799
800 /* Use the full buffer if noone asked us for anything specific */
801 u->hwbuf_unused = 0;
802
803 if (u->use_tsched) {
804 pa_usec_t latency;
805
806 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
807 size_t b;
808
809 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
810
811 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
812
813 /* We need at least one sample in our buffer */
814
815 if (PA_UNLIKELY(b < u->frame_size))
816 b = u->frame_size;
817
818 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
819 }
820
821 fix_min_sleep_wakeup(u);
822 fix_tsched_watermark(u);
823 }
824
825 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
826
827 /* We need at last one frame in the used part of the buffer */
828 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
829
830 if (u->use_tsched) {
831 pa_usec_t sleep_usec, process_usec;
832
833 hw_sleep_time(u, &sleep_usec, &process_usec);
834 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
835 }
836
837 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
838
839 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
840 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
841 return err;
842 }
843
844 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
845
846 return 0;
847 }
848
849 /* Called from IO context */
850 static int unsuspend(struct userdata *u) {
851 pa_sample_spec ss;
852 int err;
853 pa_bool_t b, d;
854 unsigned nfrags;
855 snd_pcm_uframes_t period_size;
856
857 pa_assert(u);
858 pa_assert(!u->pcm_handle);
859
860 pa_log_info("Trying resume...");
861
862 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
863 /*SND_PCM_NONBLOCK|*/
864 SND_PCM_NO_AUTO_RESAMPLE|
865 SND_PCM_NO_AUTO_CHANNELS|
866 SND_PCM_NO_AUTO_FORMAT)) < 0) {
867 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
868 goto fail;
869 }
870
871 ss = u->sink->sample_spec;
872 nfrags = u->nfragments;
873 period_size = u->fragment_size / u->frame_size;
874 b = u->use_mmap;
875 d = u->use_tsched;
876
877 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
878 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
879 goto fail;
880 }
881
882 if (b != u->use_mmap || d != u->use_tsched) {
883 pa_log_warn("Resume failed, couldn't get original access mode.");
884 goto fail;
885 }
886
887 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
888 pa_log_warn("Resume failed, couldn't restore original sample settings.");
889 goto fail;
890 }
891
892 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
893 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
894 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
895 (unsigned long) nfrags, period_size * u->frame_size);
896 goto fail;
897 }
898
899 if (update_sw_params(u) < 0)
900 goto fail;
901
902 if (build_pollfd(u) < 0)
903 goto fail;
904
905 u->write_count = 0;
906 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
907
908 u->first = TRUE;
909 u->since_start = 0;
910
911
912 pa_log_info("Resumed successfully...");
913
914 return 0;
915
916 fail:
917 if (u->pcm_handle) {
918 snd_pcm_close(u->pcm_handle);
919 u->pcm_handle = NULL;
920 }
921
922 return -1;
923 }
924
925 /* Called from IO context */
926 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
927 struct userdata *u = PA_SINK(o)->userdata;
928
929 switch (code) {
930
931 case PA_SINK_MESSAGE_GET_LATENCY: {
932 pa_usec_t r = 0;
933
934 if (u->pcm_handle)
935 r = sink_get_latency(u);
936
937 *((pa_usec_t*) data) = r;
938
939 return 0;
940 }
941
942 case PA_SINK_MESSAGE_SET_STATE:
943
944 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
945
946 case PA_SINK_SUSPENDED:
947 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
948
949 if (suspend(u) < 0)
950 return -1;
951
952 break;
953
954 case PA_SINK_IDLE:
955 case PA_SINK_RUNNING:
956
957 if (u->sink->thread_info.state == PA_SINK_INIT) {
958 if (build_pollfd(u) < 0)
959 return -1;
960 }
961
962 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
963 if (unsuspend(u) < 0)
964 return -1;
965 }
966
967 break;
968
969 case PA_SINK_UNLINKED:
970 case PA_SINK_INIT:
971 case PA_SINK_INVALID_STATE:
972 ;
973 }
974
975 break;
976 }
977
978 return pa_sink_process_msg(o, code, data, offset, chunk);
979 }
980
981 /* Called from main context */
982 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
983 pa_sink_state_t old_state;
984 struct userdata *u;
985
986 pa_sink_assert_ref(s);
987 pa_assert_se(u = s->userdata);
988
989 old_state = pa_sink_get_state(u->sink);
990
991 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
992 reserve_done(u);
993 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
994 if (reserve_init(u, u->device_name) < 0)
995 return -1;
996
997 return 0;
998 }
999
1000 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1001 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1002
1003 pa_assert(u);
1004 pa_assert(u->mixer_handle);
1005
1006 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1007 return 0;
1008
1009 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1010 pa_sink_get_volume(u->sink, TRUE, FALSE);
1011 pa_sink_get_mute(u->sink, TRUE);
1012 }
1013
1014 return 0;
1015 }
1016
1017 static void sink_get_volume_cb(pa_sink *s) {
1018 struct userdata *u = s->userdata;
1019 pa_cvolume r;
1020 char t[PA_CVOLUME_SNPRINT_MAX];
1021
1022 pa_assert(u);
1023 pa_assert(u->mixer_path);
1024 pa_assert(u->mixer_handle);
1025
1026 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1027 return;
1028
1029 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1030 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1031
1032 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1033
1034 if (pa_cvolume_equal(&u->hardware_volume, &r))
1035 return;
1036
1037 s->virtual_volume = u->hardware_volume = r;
1038
1039 if (u->mixer_path->has_dB) {
1040 pa_cvolume reset;
1041
1042 /* Hmm, so the hardware volume changed, let's reset our software volume */
1043 pa_cvolume_reset(&reset, s->sample_spec.channels);
1044 pa_sink_set_soft_volume(s, &reset);
1045 }
1046 }
1047
1048 static void sink_set_volume_cb(pa_sink *s) {
1049 struct userdata *u = s->userdata;
1050 pa_cvolume r;
1051 char t[PA_CVOLUME_SNPRINT_MAX];
1052
1053 pa_assert(u);
1054 pa_assert(u->mixer_path);
1055 pa_assert(u->mixer_handle);
1056
1057 /* Shift up by the base volume */
1058 pa_sw_cvolume_divide_scalar(&r, &s->virtual_volume, s->base_volume);
1059
1060 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1061 return;
1062
1063 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1064 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1065
1066 u->hardware_volume = r;
1067
1068 if (u->mixer_path->has_dB) {
1069
1070 /* Match exactly what the user requested by software */
1071 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1072
1073 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1074 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1075 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1076
1077 } else {
1078 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1079
1080 /* We can't match exactly what the user requested, hence let's
1081 * at least tell the user about it */
1082
1083 s->virtual_volume = r;
1084 }
1085 }
1086
1087 static void sink_get_mute_cb(pa_sink *s) {
1088 struct userdata *u = s->userdata;
1089 pa_bool_t b;
1090
1091 pa_assert(u);
1092 pa_assert(u->mixer_path);
1093 pa_assert(u->mixer_handle);
1094
1095 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1096 return;
1097
1098 s->muted = b;
1099 }
1100
1101 static void sink_set_mute_cb(pa_sink *s) {
1102 struct userdata *u = s->userdata;
1103
1104 pa_assert(u);
1105 pa_assert(u->mixer_path);
1106 pa_assert(u->mixer_handle);
1107
1108 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1109 }
1110
1111 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1112 struct userdata *u = s->userdata;
1113 pa_alsa_port_data *data;
1114
1115 pa_assert(u);
1116 pa_assert(p);
1117 pa_assert(u->mixer_handle);
1118
1119 data = PA_DEVICE_PORT_DATA(p);
1120
1121 pa_assert_se(u->mixer_path = data->path);
1122 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1123
1124 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1125 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1126 s->n_volume_steps = PA_VOLUME_NORM+1;
1127
1128 if (u->mixer_path->max_dB > 0.0)
1129 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1130 else
1131 pa_log_info("No particular base volume set, fixing to 0 dB");
1132 } else {
1133 s->base_volume = PA_VOLUME_NORM;
1134 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1135 }
1136
1137 if (data->setting)
1138 pa_alsa_setting_select(data->setting, u->mixer_handle);
1139
1140 if (s->set_mute)
1141 s->set_mute(s);
1142 if (s->set_volume)
1143 s->set_volume(s);
1144
1145 return 0;
1146 }
1147
1148 static void sink_update_requested_latency_cb(pa_sink *s) {
1149 struct userdata *u = s->userdata;
1150 size_t before;
1151 pa_assert(u);
1152
1153 if (!u->pcm_handle)
1154 return;
1155
1156 before = u->hwbuf_unused;
1157 update_sw_params(u);
1158
1159 /* Let's check whether we now use only a smaller part of the
1160 buffer then before. If so, we need to make sure that subsequent
1161 rewinds are relative to the new maximum fill level and not to the
1162 current fill level. Thus, let's do a full rewind once, to clear
1163 things up. */
1164
1165 if (u->hwbuf_unused > before) {
1166 pa_log_debug("Requesting rewind due to latency change.");
1167 pa_sink_request_rewind(s, (size_t) -1);
1168 }
1169 }
1170
1171 static int process_rewind(struct userdata *u) {
1172 snd_pcm_sframes_t unused;
1173 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1174 pa_assert(u);
1175
1176 /* Figure out how much we shall rewind and reset the counter */
1177 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1178
1179 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1180
1181 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1182 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1183 return -1;
1184 }
1185
1186 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1187
1188 if (u->hwbuf_size > unused_nbytes)
1189 limit_nbytes = u->hwbuf_size - unused_nbytes;
1190 else
1191 limit_nbytes = 0;
1192
1193 if (rewind_nbytes > limit_nbytes)
1194 rewind_nbytes = limit_nbytes;
1195
1196 if (rewind_nbytes > 0) {
1197 snd_pcm_sframes_t in_frames, out_frames;
1198
1199 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1200
1201 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1202 pa_log_debug("before: %lu", (unsigned long) in_frames);
1203 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1204 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1205 if (try_recover(u, "process_rewind", out_frames) < 0)
1206 return -1;
1207 out_frames = 0;
1208 }
1209
1210 pa_log_debug("after: %lu", (unsigned long) out_frames);
1211
1212 rewind_nbytes = (size_t) out_frames * u->frame_size;
1213
1214 if (rewind_nbytes <= 0)
1215 pa_log_info("Tried rewind, but was apparently not possible.");
1216 else {
1217 u->write_count -= rewind_nbytes;
1218 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1219 pa_sink_process_rewind(u->sink, rewind_nbytes);
1220
1221 u->after_rewind = TRUE;
1222 return 0;
1223 }
1224 } else
1225 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1226
1227 pa_sink_process_rewind(u->sink, 0);
1228 return 0;
1229 }
1230
1231 static void thread_func(void *userdata) {
1232 struct userdata *u = userdata;
1233 unsigned short revents = 0;
1234
1235 pa_assert(u);
1236
1237 pa_log_debug("Thread starting up");
1238
1239 if (u->core->realtime_scheduling)
1240 pa_make_realtime(u->core->realtime_priority);
1241
1242 pa_thread_mq_install(&u->thread_mq);
1243
1244 for (;;) {
1245 int ret;
1246
1247 #ifdef DEBUG_TIMING
1248 pa_log_debug("Loop");
1249 #endif
1250
1251 /* Render some data and write it to the dsp */
1252 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1253 int work_done;
1254 pa_usec_t sleep_usec = 0;
1255
1256 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1257 if (process_rewind(u) < 0)
1258 goto fail;
1259
1260 if (u->use_mmap)
1261 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1262 else
1263 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1264
1265 if (work_done < 0)
1266 goto fail;
1267
1268 /* pa_log_debug("work_done = %i", work_done); */
1269
1270 if (work_done) {
1271
1272 if (u->first) {
1273 pa_log_info("Starting playback.");
1274 snd_pcm_start(u->pcm_handle);
1275
1276 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1277 }
1278
1279 update_smoother(u);
1280 }
1281
1282 if (u->use_tsched) {
1283 pa_usec_t cusec;
1284
1285 if (u->since_start <= u->hwbuf_size) {
1286
1287 /* USB devices on ALSA seem to hit a buffer
1288 * underrun during the first iterations much
1289 * quicker then we calculate here, probably due to
1290 * the transport latency. To accommodate for that
1291 * we artificially decrease the sleep time until
1292 * we have filled the buffer at least once
1293 * completely.*/
1294
1295 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1296 sleep_usec /= 2;
1297 }
1298
1299 /* OK, the playback buffer is now full, let's
1300 * calculate when to wake up next */
1301 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1302
1303 /* Convert from the sound card time domain to the
1304 * system time domain */
1305 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1306
1307 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1308
1309 /* We don't trust the conversion, so we wake up whatever comes first */
1310 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1311 }
1312
1313 u->first = FALSE;
1314 u->after_rewind = FALSE;
1315
1316 } else if (u->use_tsched)
1317
1318 /* OK, we're in an invalid state, let's disable our timers */
1319 pa_rtpoll_set_timer_disabled(u->rtpoll);
1320
1321 /* Hmm, nothing to do. Let's sleep */
1322 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1323 goto fail;
1324
1325 if (ret == 0)
1326 goto finish;
1327
1328 /* Tell ALSA about this and process its response */
1329 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1330 struct pollfd *pollfd;
1331 int err;
1332 unsigned n;
1333
1334 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1335
1336 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1337 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1338 goto fail;
1339 }
1340
1341 if (revents & ~POLLOUT) {
1342 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1343 goto fail;
1344
1345 u->first = TRUE;
1346 u->since_start = 0;
1347 } else if (revents && u->use_tsched && pa_log_ratelimit())
1348 pa_log_debug("Wakeup from ALSA!");
1349
1350 } else
1351 revents = 0;
1352 }
1353
1354 fail:
1355 /* If this was no regular exit from the loop we have to continue
1356 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1357 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1358 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1359
1360 finish:
1361 pa_log_debug("Thread shutting down");
1362 }
1363
1364 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1365 const char *n;
1366 char *t;
1367
1368 pa_assert(data);
1369 pa_assert(ma);
1370 pa_assert(device_name);
1371
1372 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1373 pa_sink_new_data_set_name(data, n);
1374 data->namereg_fail = TRUE;
1375 return;
1376 }
1377
1378 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1379 data->namereg_fail = TRUE;
1380 else {
1381 n = device_id ? device_id : device_name;
1382 data->namereg_fail = FALSE;
1383 }
1384
1385 if (mapping)
1386 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1387 else
1388 t = pa_sprintf_malloc("alsa_output.%s", n);
1389
1390 pa_sink_new_data_set_name(data, t);
1391 pa_xfree(t);
1392 }
1393
1394 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1395
1396 if (!mapping && !element)
1397 return;
1398
1399 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1400 pa_log_info("Failed to find a working mixer device.");
1401 return;
1402 }
1403
1404 if (element) {
1405
1406 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1407 goto fail;
1408
1409 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1410 goto fail;
1411
1412 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1413 pa_alsa_path_dump(u->mixer_path);
1414 } else {
1415
1416 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1417 goto fail;
1418
1419 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1420
1421 pa_log_debug("Probed mixer paths:");
1422 pa_alsa_path_set_dump(u->mixer_path_set);
1423 }
1424
1425 return;
1426
1427 fail:
1428
1429 if (u->mixer_path_set) {
1430 pa_alsa_path_set_free(u->mixer_path_set);
1431 u->mixer_path_set = NULL;
1432 } else if (u->mixer_path) {
1433 pa_alsa_path_free(u->mixer_path);
1434 u->mixer_path = NULL;
1435 }
1436
1437 if (u->mixer_handle) {
1438 snd_mixer_close(u->mixer_handle);
1439 u->mixer_handle = NULL;
1440 }
1441 }
1442
1443 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1444 pa_assert(u);
1445
1446 if (!u->mixer_handle)
1447 return 0;
1448
1449 if (u->sink->active_port) {
1450 pa_alsa_port_data *data;
1451
1452 /* We have a list of supported paths, so let's activate the
1453 * one that has been chosen as active */
1454
1455 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1456 u->mixer_path = data->path;
1457
1458 pa_alsa_path_select(data->path, u->mixer_handle);
1459
1460 if (data->setting)
1461 pa_alsa_setting_select(data->setting, u->mixer_handle);
1462
1463 } else {
1464
1465 if (!u->mixer_path && u->mixer_path_set)
1466 u->mixer_path = u->mixer_path_set->paths;
1467
1468 if (u->mixer_path) {
1469 /* Hmm, we have only a single path, then let's activate it */
1470
1471 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1472
1473 if (u->mixer_path->settings)
1474 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1475 } else
1476 return 0;
1477 }
1478
1479 if (!u->mixer_path->has_volume)
1480 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1481 else {
1482
1483 if (u->mixer_path->has_dB) {
1484 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1485
1486 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1487 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1488
1489 if (u->mixer_path->max_dB > 0.0)
1490 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1491 else
1492 pa_log_info("No particular base volume set, fixing to 0 dB");
1493
1494 } else {
1495 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1496 u->sink->base_volume = PA_VOLUME_NORM;
1497 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1498 }
1499
1500 u->sink->get_volume = sink_get_volume_cb;
1501 u->sink->set_volume = sink_set_volume_cb;
1502
1503 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1504 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1505 }
1506
1507 if (!u->mixer_path->has_mute) {
1508 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1509 } else {
1510 u->sink->get_mute = sink_get_mute_cb;
1511 u->sink->set_mute = sink_set_mute_cb;
1512 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1513 pa_log_info("Using hardware mute control.");
1514 }
1515
1516 u->mixer_fdl = pa_alsa_fdlist_new();
1517
1518 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1519 pa_log("Failed to initialize file descriptor monitoring");
1520 return -1;
1521 }
1522
1523 if (u->mixer_path_set)
1524 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1525 else
1526 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1527
1528 return 0;
1529 }
1530
1531 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1532
1533 struct userdata *u = NULL;
1534 const char *dev_id = NULL;
1535 pa_sample_spec ss, requested_ss;
1536 pa_channel_map map;
1537 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1538 snd_pcm_uframes_t period_frames, tsched_frames;
1539 size_t frame_size;
1540 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1541 pa_sink_new_data data;
1542 pa_alsa_profile_set *profile_set = NULL;
1543
1544 pa_assert(m);
1545 pa_assert(ma);
1546
1547 ss = m->core->default_sample_spec;
1548 map = m->core->default_channel_map;
1549 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1550 pa_log("Failed to parse sample specification and channel map");
1551 goto fail;
1552 }
1553
1554 requested_ss = ss;
1555 frame_size = pa_frame_size(&ss);
1556
1557 nfrags = m->core->default_n_fragments;
1558 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1559 if (frag_size <= 0)
1560 frag_size = (uint32_t) frame_size;
1561 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1562 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1563
1564 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1565 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1566 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1567 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1568 pa_log("Failed to parse buffer metrics");
1569 goto fail;
1570 }
1571
1572 hwbuf_size = frag_size * nfrags;
1573 period_frames = frag_size/frame_size;
1574 tsched_frames = tsched_size/frame_size;
1575
1576 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1577 pa_log("Failed to parse mmap argument.");
1578 goto fail;
1579 }
1580
1581 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1582 pa_log("Failed to parse tsched argument.");
1583 goto fail;
1584 }
1585
1586 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1587 pa_log("Failed to parse ignore_dB argument.");
1588 goto fail;
1589 }
1590
1591 if (use_tsched && !pa_rtclock_hrtimer()) {
1592 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1593 use_tsched = FALSE;
1594 }
1595
1596 u = pa_xnew0(struct userdata, 1);
1597 u->core = m->core;
1598 u->module = m;
1599 u->use_mmap = use_mmap;
1600 u->use_tsched = use_tsched;
1601 u->first = TRUE;
1602 u->rtpoll = pa_rtpoll_new();
1603 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1604
1605 u->smoother = pa_smoother_new(
1606 DEFAULT_TSCHED_BUFFER_USEC*2,
1607 DEFAULT_TSCHED_BUFFER_USEC*2,
1608 TRUE,
1609 TRUE,
1610 5,
1611 pa_rtclock_now(),
1612 TRUE);
1613
1614 dev_id = pa_modargs_get_value(
1615 ma, "device_id",
1616 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1617
1618 if (reserve_init(u, dev_id) < 0)
1619 goto fail;
1620
1621 if (reserve_monitor_init(u, dev_id) < 0)
1622 goto fail;
1623
1624 b = use_mmap;
1625 d = use_tsched;
1626
1627 if (mapping) {
1628
1629 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1630 pa_log("device_id= not set");
1631 goto fail;
1632 }
1633
1634 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1635 dev_id,
1636 &u->device_name,
1637 &ss, &map,
1638 SND_PCM_STREAM_PLAYBACK,
1639 &nfrags, &period_frames, tsched_frames,
1640 &b, &d, mapping)))
1641
1642 goto fail;
1643
1644 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1645
1646 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1647 goto fail;
1648
1649 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1650 dev_id,
1651 &u->device_name,
1652 &ss, &map,
1653 SND_PCM_STREAM_PLAYBACK,
1654 &nfrags, &period_frames, tsched_frames,
1655 &b, &d, profile_set, &mapping)))
1656
1657 goto fail;
1658
1659 } else {
1660
1661 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1662 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1663 &u->device_name,
1664 &ss, &map,
1665 SND_PCM_STREAM_PLAYBACK,
1666 &nfrags, &period_frames, tsched_frames,
1667 &b, &d, FALSE)))
1668 goto fail;
1669 }
1670
1671 pa_assert(u->device_name);
1672 pa_log_info("Successfully opened device %s.", u->device_name);
1673
1674 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1675 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1676 goto fail;
1677 }
1678
1679 if (mapping)
1680 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1681
1682 if (use_mmap && !b) {
1683 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1684 u->use_mmap = use_mmap = FALSE;
1685 }
1686
1687 if (use_tsched && (!b || !d)) {
1688 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1689 u->use_tsched = use_tsched = FALSE;
1690 }
1691
1692 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1693 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1694 u->use_tsched = use_tsched = FALSE;
1695 }
1696
1697 if (u->use_mmap)
1698 pa_log_info("Successfully enabled mmap() mode.");
1699
1700 if (u->use_tsched)
1701 pa_log_info("Successfully enabled timer-based scheduling mode.");
1702
1703 /* ALSA might tweak the sample spec, so recalculate the frame size */
1704 frame_size = pa_frame_size(&ss);
1705
1706 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1707
1708 pa_sink_new_data_init(&data);
1709 data.driver = driver;
1710 data.module = m;
1711 data.card = card;
1712 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1713 pa_sink_new_data_set_sample_spec(&data, &ss);
1714 pa_sink_new_data_set_channel_map(&data, &map);
1715
1716 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1717 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1718 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1719 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1720 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1721
1722 if (mapping) {
1723 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1724 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1725 }
1726
1727 pa_alsa_init_description(data.proplist);
1728
1729 if (u->control_device)
1730 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1731
1732 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1733 pa_log("Invalid properties");
1734 pa_sink_new_data_done(&data);
1735 goto fail;
1736 }
1737
1738 if (u->mixer_path_set)
1739 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1740
1741 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1742 pa_sink_new_data_done(&data);
1743
1744 if (!u->sink) {
1745 pa_log("Failed to create sink object");
1746 goto fail;
1747 }
1748
1749 u->sink->parent.process_msg = sink_process_msg;
1750 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1751 u->sink->set_state = sink_set_state_cb;
1752 u->sink->set_port = sink_set_port_cb;
1753 u->sink->userdata = u;
1754
1755 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1756 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1757
1758 u->frame_size = frame_size;
1759 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1760 u->nfragments = nfrags;
1761 u->hwbuf_size = u->fragment_size * nfrags;
1762 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1763 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1764
1765 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1766 nfrags, (long unsigned) u->fragment_size,
1767 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1768
1769 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1770 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1771
1772 if (u->use_tsched) {
1773 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1774
1775 fix_min_sleep_wakeup(u);
1776 fix_tsched_watermark(u);
1777
1778 pa_sink_set_latency_range(u->sink,
1779 0,
1780 pa_bytes_to_usec(u->hwbuf_size, &ss));
1781
1782 pa_log_info("Time scheduling watermark is %0.2fms",
1783 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1784 } else
1785 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1786
1787 reserve_update(u);
1788
1789 if (update_sw_params(u) < 0)
1790 goto fail;
1791
1792 if (setup_mixer(u, ignore_dB) < 0)
1793 goto fail;
1794
1795 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1796
1797 if (!(u->thread = pa_thread_new(thread_func, u))) {
1798 pa_log("Failed to create thread.");
1799 goto fail;
1800 }
1801
1802 /* Get initial mixer settings */
1803 if (data.volume_is_set) {
1804 if (u->sink->set_volume)
1805 u->sink->set_volume(u->sink);
1806 } else {
1807 if (u->sink->get_volume)
1808 u->sink->get_volume(u->sink);
1809 }
1810
1811 if (data.muted_is_set) {
1812 if (u->sink->set_mute)
1813 u->sink->set_mute(u->sink);
1814 } else {
1815 if (u->sink->get_mute)
1816 u->sink->get_mute(u->sink);
1817 }
1818
1819 pa_sink_put(u->sink);
1820
1821 if (profile_set)
1822 pa_alsa_profile_set_free(profile_set);
1823
1824 return u->sink;
1825
1826 fail:
1827
1828 if (u)
1829 userdata_free(u);
1830
1831 if (profile_set)
1832 pa_alsa_profile_set_free(profile_set);
1833
1834 return NULL;
1835 }
1836
1837 static void userdata_free(struct userdata *u) {
1838 pa_assert(u);
1839
1840 if (u->sink)
1841 pa_sink_unlink(u->sink);
1842
1843 if (u->thread) {
1844 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1845 pa_thread_free(u->thread);
1846 }
1847
1848 pa_thread_mq_done(&u->thread_mq);
1849
1850 if (u->sink)
1851 pa_sink_unref(u->sink);
1852
1853 if (u->memchunk.memblock)
1854 pa_memblock_unref(u->memchunk.memblock);
1855
1856 if (u->alsa_rtpoll_item)
1857 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1858
1859 if (u->rtpoll)
1860 pa_rtpoll_free(u->rtpoll);
1861
1862 if (u->pcm_handle) {
1863 snd_pcm_drop(u->pcm_handle);
1864 snd_pcm_close(u->pcm_handle);
1865 }
1866
1867 if (u->mixer_fdl)
1868 pa_alsa_fdlist_free(u->mixer_fdl);
1869
1870 if (u->mixer_path_set)
1871 pa_alsa_path_set_free(u->mixer_path_set);
1872 else if (u->mixer_path)
1873 pa_alsa_path_free(u->mixer_path);
1874
1875 if (u->mixer_handle)
1876 snd_mixer_close(u->mixer_handle);
1877
1878 if (u->smoother)
1879 pa_smoother_free(u->smoother);
1880
1881 reserve_done(u);
1882 monitor_done(u);
1883
1884 pa_xfree(u->device_name);
1885 pa_xfree(u->control_device);
1886 pa_xfree(u);
1887 }
1888
1889 void pa_alsa_sink_free(pa_sink *s) {
1890 struct userdata *u;
1891
1892 pa_sink_assert_ref(s);
1893 pa_assert_se(u = s->userdata);
1894
1895 userdata_free(u);
1896 }