]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Merge commit 'origin/master-tx'
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38 #include <pulse/i18n.h>
39
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/rtclock.h>
54 #include <pulsecore/time-smoother.h>
55
56 #include <modules/reserve-wrap.h>
57
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
60
61 /* #define DEBUG_TIMING */
62
63 #define DEFAULT_DEVICE "default"
64 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
65 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
66 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
67 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
68 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
69
70 struct userdata {
71 pa_core *core;
72 pa_module *module;
73 pa_sink *sink;
74
75 pa_thread *thread;
76 pa_thread_mq thread_mq;
77 pa_rtpoll *rtpoll;
78
79 snd_pcm_t *pcm_handle;
80
81 pa_alsa_fdlist *mixer_fdl;
82 snd_mixer_t *mixer_handle;
83 pa_alsa_path_set *mixer_path_set;
84 pa_alsa_path *mixer_path;
85
86 pa_cvolume hardware_volume;
87
88 size_t
89 frame_size,
90 fragment_size,
91 hwbuf_size,
92 tsched_watermark,
93 hwbuf_unused,
94 min_sleep,
95 min_wakeup,
96 watermark_step;
97
98 unsigned nfragments;
99 pa_memchunk memchunk;
100
101 char *device_name; /* name of the PCM device */
102 char *control_device; /* name of the control device */
103
104 pa_bool_t use_mmap:1, use_tsched:1;
105
106 pa_bool_t first, after_rewind;
107
108 pa_rtpoll_item *alsa_rtpoll_item;
109
110 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
111
112 pa_smoother *smoother;
113 uint64_t write_count;
114 uint64_t since_start;
115
116 pa_reserve_wrapper *reserve;
117 pa_hook_slot *reserve_slot;
118 pa_reserve_monitor_wrapper *monitor;
119 pa_hook_slot *monitor_slot;
120 };
121
122 static void userdata_free(struct userdata *u);
123
124 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
125 pa_assert(r);
126 pa_assert(u);
127
128 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
129 return PA_HOOK_CANCEL;
130
131 return PA_HOOK_OK;
132 }
133
134 static void reserve_done(struct userdata *u) {
135 pa_assert(u);
136
137 if (u->reserve_slot) {
138 pa_hook_slot_free(u->reserve_slot);
139 u->reserve_slot = NULL;
140 }
141
142 if (u->reserve) {
143 pa_reserve_wrapper_unref(u->reserve);
144 u->reserve = NULL;
145 }
146 }
147
148 static void reserve_update(struct userdata *u) {
149 const char *description;
150 pa_assert(u);
151
152 if (!u->sink || !u->reserve)
153 return;
154
155 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
156 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
157 }
158
159 static int reserve_init(struct userdata *u, const char *dname) {
160 char *rname;
161
162 pa_assert(u);
163 pa_assert(dname);
164
165 if (u->reserve)
166 return 0;
167
168 if (pa_in_system_mode())
169 return 0;
170
171 /* We are resuming, try to lock the device */
172 if (!(rname = pa_alsa_get_reserve_name(dname)))
173 return 0;
174
175 u->reserve = pa_reserve_wrapper_get(u->core, rname);
176 pa_xfree(rname);
177
178 if (!(u->reserve))
179 return -1;
180
181 reserve_update(u);
182
183 pa_assert(!u->reserve_slot);
184 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
185
186 return 0;
187 }
188
189 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
190 pa_bool_t b;
191
192 pa_assert(w);
193 pa_assert(u);
194
195 b = PA_PTR_TO_UINT(busy) && !u->reserve;
196
197 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
198 return PA_HOOK_OK;
199 }
200
201 static void monitor_done(struct userdata *u) {
202 pa_assert(u);
203
204 if (u->monitor_slot) {
205 pa_hook_slot_free(u->monitor_slot);
206 u->monitor_slot = NULL;
207 }
208
209 if (u->monitor) {
210 pa_reserve_monitor_wrapper_unref(u->monitor);
211 u->monitor = NULL;
212 }
213 }
214
215 static int reserve_monitor_init(struct userdata *u, const char *dname) {
216 char *rname;
217
218 pa_assert(u);
219 pa_assert(dname);
220
221 if (pa_in_system_mode())
222 return 0;
223
224 /* We are resuming, try to lock the device */
225 if (!(rname = pa_alsa_get_reserve_name(dname)))
226 return 0;
227
228 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
229 pa_xfree(rname);
230
231 if (!(u->monitor))
232 return -1;
233
234 pa_assert(!u->monitor_slot);
235 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
236
237 return 0;
238 }
239
240 static void fix_min_sleep_wakeup(struct userdata *u) {
241 size_t max_use, max_use_2;
242
243 pa_assert(u);
244
245 max_use = u->hwbuf_size - u->hwbuf_unused;
246 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
247
248 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
249 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
250
251 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
252 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
253 }
254
255 static void fix_tsched_watermark(struct userdata *u) {
256 size_t max_use;
257 pa_assert(u);
258
259 max_use = u->hwbuf_size - u->hwbuf_unused;
260
261 if (u->tsched_watermark > max_use - u->min_sleep)
262 u->tsched_watermark = max_use - u->min_sleep;
263
264 if (u->tsched_watermark < u->min_wakeup)
265 u->tsched_watermark = u->min_wakeup;
266 }
267
268 static void adjust_after_underrun(struct userdata *u) {
269 size_t old_watermark;
270 pa_usec_t old_min_latency, new_min_latency;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 /* First, just try to increase the watermark */
276 old_watermark = u->tsched_watermark;
277 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
278 fix_tsched_watermark(u);
279
280 if (old_watermark != u->tsched_watermark) {
281 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
282 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
283 return;
284 }
285
286 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
287 old_min_latency = u->sink->thread_info.min_latency;
288 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
289 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
290
291 if (old_min_latency != new_min_latency) {
292 pa_log_notice("Increasing minimal latency to %0.2f ms",
293 (double) new_min_latency / PA_USEC_PER_MSEC);
294
295 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
296 return;
297 }
298
299 /* When we reach this we're officialy fucked! */
300 }
301
302 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
303 pa_usec_t usec, wm;
304
305 pa_assert(sleep_usec);
306 pa_assert(process_usec);
307
308 pa_assert(u);
309
310 usec = pa_sink_get_requested_latency_within_thread(u->sink);
311
312 if (usec == (pa_usec_t) -1)
313 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
314
315 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
316
317 if (wm > usec)
318 wm = usec/2;
319
320 *sleep_usec = usec - wm;
321 *process_usec = wm;
322
323 #ifdef DEBUG_TIMING
324 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
325 (unsigned long) (usec / PA_USEC_PER_MSEC),
326 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
327 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
328 #endif
329 }
330
331 static int try_recover(struct userdata *u, const char *call, int err) {
332 pa_assert(u);
333 pa_assert(call);
334 pa_assert(err < 0);
335
336 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
337
338 pa_assert(err != -EAGAIN);
339
340 if (err == -EPIPE)
341 pa_log_debug("%s: Buffer underrun!", call);
342
343 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
344 pa_log("%s: %s", call, pa_alsa_strerror(err));
345 return -1;
346 }
347
348 u->first = TRUE;
349 u->since_start = 0;
350 return 0;
351 }
352
353 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
354 size_t left_to_play;
355
356 /* We use <= instead of < for this check here because an underrun
357 * only happens after the last sample was processed, not already when
358 * it is removed from the buffer. This is particularly important
359 * when block transfer is used. */
360
361 if (n_bytes <= u->hwbuf_size) {
362 left_to_play = u->hwbuf_size - n_bytes;
363
364 #ifdef DEBUG_TIMING
365 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
366 #endif
367
368 } else {
369 left_to_play = 0;
370
371 #ifdef DEBUG_TIMING
372 PA_DEBUG_TRAP;
373 #endif
374
375 if (!u->first && !u->after_rewind) {
376
377 if (pa_log_ratelimit())
378 pa_log_info("Underrun!");
379
380 if (u->use_tsched)
381 adjust_after_underrun(u);
382 }
383 }
384
385 return left_to_play;
386 }
387
388 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
389 pa_bool_t work_done = TRUE;
390 pa_usec_t max_sleep_usec = 0, process_usec = 0;
391 size_t left_to_play;
392 unsigned j = 0;
393
394 pa_assert(u);
395 pa_sink_assert_ref(u->sink);
396
397 if (u->use_tsched)
398 hw_sleep_time(u, &max_sleep_usec, &process_usec);
399
400 for (;;) {
401 snd_pcm_sframes_t n;
402 size_t n_bytes;
403 int r;
404
405 /* First we determine how many samples are missing to fill the
406 * buffer up to 100% */
407
408 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
409
410 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
411 continue;
412
413 return r;
414 }
415
416 n_bytes = (size_t) n * u->frame_size;
417
418 #ifdef DEBUG_TIMING
419 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
420 #endif
421
422 left_to_play = check_left_to_play(u, n_bytes);
423
424 if (u->use_tsched)
425
426 /* We won't fill up the playback buffer before at least
427 * half the sleep time is over because otherwise we might
428 * ask for more data from the clients then they expect. We
429 * need to guarantee that clients only have to keep around
430 * a single hw buffer length. */
431
432 if (!polled &&
433 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
434 #ifdef DEBUG_TIMING
435 pa_log_debug("Not filling up, because too early.");
436 #endif
437 break;
438 }
439
440 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
441
442 if (polled)
443 PA_ONCE_BEGIN {
444 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
445 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
446 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
447 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
448 pa_strnull(dn));
449 pa_xfree(dn);
450 } PA_ONCE_END;
451
452 #ifdef DEBUG_TIMING
453 pa_log_debug("Not filling up, because not necessary.");
454 #endif
455 break;
456 }
457
458
459 if (++j > 10) {
460 #ifdef DEBUG_TIMING
461 pa_log_debug("Not filling up, because already too many iterations.");
462 #endif
463
464 break;
465 }
466
467 n_bytes -= u->hwbuf_unused;
468 polled = FALSE;
469
470 #ifdef DEBUG_TIMING
471 pa_log_debug("Filling up");
472 #endif
473
474 for (;;) {
475 pa_memchunk chunk;
476 void *p;
477 int err;
478 const snd_pcm_channel_area_t *areas;
479 snd_pcm_uframes_t offset, frames;
480 snd_pcm_sframes_t sframes;
481
482 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
483 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
484
485 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
486
487 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
488 continue;
489
490 return r;
491 }
492
493 /* Make sure that if these memblocks need to be copied they will fit into one slot */
494 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
495 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
496
497 /* Check these are multiples of 8 bit */
498 pa_assert((areas[0].first & 7) == 0);
499 pa_assert((areas[0].step & 7)== 0);
500
501 /* We assume a single interleaved memory buffer */
502 pa_assert((areas[0].first >> 3) == 0);
503 pa_assert((areas[0].step >> 3) == u->frame_size);
504
505 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
506
507 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
508 chunk.length = pa_memblock_get_length(chunk.memblock);
509 chunk.index = 0;
510
511 pa_sink_render_into_full(u->sink, &chunk);
512 pa_memblock_unref_fixed(chunk.memblock);
513
514 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
515
516 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
517 continue;
518
519 return r;
520 }
521
522 work_done = TRUE;
523
524 u->write_count += frames * u->frame_size;
525 u->since_start += frames * u->frame_size;
526
527 #ifdef DEBUG_TIMING
528 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
529 #endif
530
531 if ((size_t) frames * u->frame_size >= n_bytes)
532 break;
533
534 n_bytes -= (size_t) frames * u->frame_size;
535 }
536 }
537
538 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
539
540 if (*sleep_usec > process_usec)
541 *sleep_usec -= process_usec;
542 else
543 *sleep_usec = 0;
544
545 return work_done ? 1 : 0;
546 }
547
548 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
549 pa_bool_t work_done = FALSE;
550 pa_usec_t max_sleep_usec = 0, process_usec = 0;
551 size_t left_to_play;
552 unsigned j = 0;
553
554 pa_assert(u);
555 pa_sink_assert_ref(u->sink);
556
557 if (u->use_tsched)
558 hw_sleep_time(u, &max_sleep_usec, &process_usec);
559
560 for (;;) {
561 snd_pcm_sframes_t n;
562 size_t n_bytes;
563 int r;
564
565 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
566
567 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
568 continue;
569
570 return r;
571 }
572
573 n_bytes = (size_t) n * u->frame_size;
574 left_to_play = check_left_to_play(u, n_bytes);
575
576 if (u->use_tsched)
577
578 /* We won't fill up the playback buffer before at least
579 * half the sleep time is over because otherwise we might
580 * ask for more data from the clients then they expect. We
581 * need to guarantee that clients only have to keep around
582 * a single hw buffer length. */
583
584 if (!polled &&
585 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
586 break;
587
588 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
589
590 if (polled)
591 PA_ONCE_BEGIN {
592 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
593 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
594 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
595 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
596 pa_strnull(dn));
597 pa_xfree(dn);
598 } PA_ONCE_END;
599
600 break;
601 }
602
603 if (++j > 10) {
604 #ifdef DEBUG_TIMING
605 pa_log_debug("Not filling up, because already too many iterations.");
606 #endif
607
608 break;
609 }
610
611 n_bytes -= u->hwbuf_unused;
612 polled = FALSE;
613
614 for (;;) {
615 snd_pcm_sframes_t frames;
616 void *p;
617
618 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
619
620 if (u->memchunk.length <= 0)
621 pa_sink_render(u->sink, n_bytes, &u->memchunk);
622
623 pa_assert(u->memchunk.length > 0);
624
625 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
626
627 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
628 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
629
630 p = pa_memblock_acquire(u->memchunk.memblock);
631 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
632 pa_memblock_release(u->memchunk.memblock);
633
634 pa_assert(frames != 0);
635
636 if (PA_UNLIKELY(frames < 0)) {
637
638 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
639 continue;
640
641 return r;
642 }
643
644 u->memchunk.index += (size_t) frames * u->frame_size;
645 u->memchunk.length -= (size_t) frames * u->frame_size;
646
647 if (u->memchunk.length <= 0) {
648 pa_memblock_unref(u->memchunk.memblock);
649 pa_memchunk_reset(&u->memchunk);
650 }
651
652 work_done = TRUE;
653
654 u->write_count += frames * u->frame_size;
655 u->since_start += frames * u->frame_size;
656
657 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
658
659 if ((size_t) frames * u->frame_size >= n_bytes)
660 break;
661
662 n_bytes -= (size_t) frames * u->frame_size;
663 }
664 }
665
666 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
667
668 if (*sleep_usec > process_usec)
669 *sleep_usec -= process_usec;
670 else
671 *sleep_usec = 0;
672
673 return work_done ? 1 : 0;
674 }
675
676 static void update_smoother(struct userdata *u) {
677 snd_pcm_sframes_t delay = 0;
678 int64_t position;
679 int err;
680 pa_usec_t now1 = 0, now2;
681 snd_pcm_status_t *status;
682
683 snd_pcm_status_alloca(&status);
684
685 pa_assert(u);
686 pa_assert(u->pcm_handle);
687
688 /* Let's update the time smoother */
689
690 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
691 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
692 return;
693 }
694
695 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
696 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
697 else {
698 snd_htimestamp_t htstamp = { 0, 0 };
699 snd_pcm_status_get_htstamp(status, &htstamp);
700 now1 = pa_timespec_load(&htstamp);
701 }
702
703 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
704
705 if (PA_UNLIKELY(position < 0))
706 position = 0;
707
708 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
709 if (now1 <= 0)
710 now1 = pa_rtclock_usec();
711
712 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
713
714 pa_smoother_put(u->smoother, now1, now2);
715 }
716
717 static pa_usec_t sink_get_latency(struct userdata *u) {
718 pa_usec_t r;
719 int64_t delay;
720 pa_usec_t now1, now2;
721
722 pa_assert(u);
723
724 now1 = pa_rtclock_usec();
725 now2 = pa_smoother_get(u->smoother, now1);
726
727 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
728
729 r = delay >= 0 ? (pa_usec_t) delay : 0;
730
731 if (u->memchunk.memblock)
732 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
733
734 return r;
735 }
736
737 static int build_pollfd(struct userdata *u) {
738 pa_assert(u);
739 pa_assert(u->pcm_handle);
740
741 if (u->alsa_rtpoll_item)
742 pa_rtpoll_item_free(u->alsa_rtpoll_item);
743
744 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
745 return -1;
746
747 return 0;
748 }
749
750 /* Called from IO context */
751 static int suspend(struct userdata *u) {
752 pa_assert(u);
753 pa_assert(u->pcm_handle);
754
755 pa_smoother_pause(u->smoother, pa_rtclock_usec());
756
757 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
758 * take awfully long with our long buffer sizes today. */
759 snd_pcm_close(u->pcm_handle);
760 u->pcm_handle = NULL;
761
762 if (u->alsa_rtpoll_item) {
763 pa_rtpoll_item_free(u->alsa_rtpoll_item);
764 u->alsa_rtpoll_item = NULL;
765 }
766
767 pa_log_info("Device suspended...");
768
769 return 0;
770 }
771
772 /* Called from IO context */
773 static int update_sw_params(struct userdata *u) {
774 snd_pcm_uframes_t avail_min;
775 int err;
776
777 pa_assert(u);
778
779 /* Use the full buffer if noone asked us for anything specific */
780 u->hwbuf_unused = 0;
781
782 if (u->use_tsched) {
783 pa_usec_t latency;
784
785 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
786 size_t b;
787
788 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
789
790 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
791
792 /* We need at least one sample in our buffer */
793
794 if (PA_UNLIKELY(b < u->frame_size))
795 b = u->frame_size;
796
797 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
798 }
799
800 fix_min_sleep_wakeup(u);
801 fix_tsched_watermark(u);
802 }
803
804 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
805
806 /* We need at last one frame in the used part of the buffer */
807 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
808
809 if (u->use_tsched) {
810 pa_usec_t sleep_usec, process_usec;
811
812 hw_sleep_time(u, &sleep_usec, &process_usec);
813 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
814 }
815
816 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
817
818 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
819 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
820 return err;
821 }
822
823 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
824
825 return 0;
826 }
827
828 /* Called from IO context */
829 static int unsuspend(struct userdata *u) {
830 pa_sample_spec ss;
831 int err;
832 pa_bool_t b, d;
833 unsigned nfrags;
834 snd_pcm_uframes_t period_size;
835
836 pa_assert(u);
837 pa_assert(!u->pcm_handle);
838
839 pa_log_info("Trying resume...");
840
841 snd_config_update_free_global();
842 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
843 /*SND_PCM_NONBLOCK|*/
844 SND_PCM_NO_AUTO_RESAMPLE|
845 SND_PCM_NO_AUTO_CHANNELS|
846 SND_PCM_NO_AUTO_FORMAT)) < 0) {
847 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
848 goto fail;
849 }
850
851 ss = u->sink->sample_spec;
852 nfrags = u->nfragments;
853 period_size = u->fragment_size / u->frame_size;
854 b = u->use_mmap;
855 d = u->use_tsched;
856
857 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
858 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
859 goto fail;
860 }
861
862 if (b != u->use_mmap || d != u->use_tsched) {
863 pa_log_warn("Resume failed, couldn't get original access mode.");
864 goto fail;
865 }
866
867 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
868 pa_log_warn("Resume failed, couldn't restore original sample settings.");
869 goto fail;
870 }
871
872 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
873 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
874 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
875 (unsigned long) nfrags, period_size * u->frame_size);
876 goto fail;
877 }
878
879 if (update_sw_params(u) < 0)
880 goto fail;
881
882 if (build_pollfd(u) < 0)
883 goto fail;
884
885 u->first = TRUE;
886 u->since_start = 0;
887
888 pa_log_info("Resumed successfully...");
889
890 return 0;
891
892 fail:
893 if (u->pcm_handle) {
894 snd_pcm_close(u->pcm_handle);
895 u->pcm_handle = NULL;
896 }
897
898 return -1;
899 }
900
901 /* Called from IO context */
902 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
903 struct userdata *u = PA_SINK(o)->userdata;
904
905 switch (code) {
906
907 case PA_SINK_MESSAGE_GET_LATENCY: {
908 pa_usec_t r = 0;
909
910 if (u->pcm_handle)
911 r = sink_get_latency(u);
912
913 *((pa_usec_t*) data) = r;
914
915 return 0;
916 }
917
918 case PA_SINK_MESSAGE_SET_STATE:
919
920 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
921
922 case PA_SINK_SUSPENDED:
923 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
924
925 if (suspend(u) < 0)
926 return -1;
927
928 break;
929
930 case PA_SINK_IDLE:
931 case PA_SINK_RUNNING:
932
933 if (u->sink->thread_info.state == PA_SINK_INIT) {
934 if (build_pollfd(u) < 0)
935 return -1;
936 }
937
938 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
939 if (unsuspend(u) < 0)
940 return -1;
941 }
942
943 break;
944
945 case PA_SINK_UNLINKED:
946 case PA_SINK_INIT:
947 case PA_SINK_INVALID_STATE:
948 ;
949 }
950
951 break;
952 }
953
954 return pa_sink_process_msg(o, code, data, offset, chunk);
955 }
956
957 /* Called from main context */
958 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
959 pa_sink_state_t old_state;
960 struct userdata *u;
961
962 pa_sink_assert_ref(s);
963 pa_assert_se(u = s->userdata);
964
965 old_state = pa_sink_get_state(u->sink);
966
967 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
968 reserve_done(u);
969 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
970 if (reserve_init(u, u->device_name) < 0)
971 return -1;
972
973 return 0;
974 }
975
976 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
977 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
978
979 pa_assert(u);
980 pa_assert(u->mixer_handle);
981
982 if (mask == SND_CTL_EVENT_MASK_REMOVE)
983 return 0;
984
985 if (mask & SND_CTL_EVENT_MASK_VALUE) {
986 pa_sink_get_volume(u->sink, TRUE, FALSE);
987 pa_sink_get_mute(u->sink, TRUE);
988 }
989
990 return 0;
991 }
992
993 static void sink_get_volume_cb(pa_sink *s) {
994 struct userdata *u = s->userdata;
995 pa_cvolume r;
996 char t[PA_CVOLUME_SNPRINT_MAX];
997
998 pa_assert(u);
999 pa_assert(u->mixer_path);
1000 pa_assert(u->mixer_handle);
1001
1002 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1003 return;
1004
1005 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1006 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1007
1008 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1009
1010 if (pa_cvolume_equal(&u->hardware_volume, &r))
1011 return;
1012
1013 s->virtual_volume = u->hardware_volume = r;
1014
1015 if (u->mixer_path->has_dB) {
1016 pa_cvolume reset;
1017
1018 /* Hmm, so the hardware volume changed, let's reset our software volume */
1019 pa_cvolume_reset(&reset, s->sample_spec.channels);
1020 pa_sink_set_soft_volume(s, &reset);
1021 }
1022 }
1023
1024 static void sink_set_volume_cb(pa_sink *s) {
1025 struct userdata *u = s->userdata;
1026 pa_cvolume r;
1027 char t[PA_CVOLUME_SNPRINT_MAX];
1028
1029 pa_assert(u);
1030 pa_assert(u->mixer_path);
1031 pa_assert(u->mixer_handle);
1032
1033 /* Shift up by the base volume */
1034 pa_sw_cvolume_divide_scalar(&r, &s->virtual_volume, s->base_volume);
1035
1036 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1037 return;
1038
1039 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1040 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1041
1042 u->hardware_volume = r;
1043
1044 if (u->mixer_path->has_dB) {
1045
1046 /* Match exactly what the user requested by software */
1047 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1048
1049 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1050 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1051 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1052
1053 } else {
1054 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1055
1056 /* We can't match exactly what the user requested, hence let's
1057 * at least tell the user about it */
1058
1059 s->virtual_volume = r;
1060 }
1061 }
1062
1063 static void sink_get_mute_cb(pa_sink *s) {
1064 struct userdata *u = s->userdata;
1065 pa_bool_t b;
1066
1067 pa_assert(u);
1068 pa_assert(u->mixer_path);
1069 pa_assert(u->mixer_handle);
1070
1071 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1072 return;
1073
1074 s->muted = b;
1075 }
1076
1077 static void sink_set_mute_cb(pa_sink *s) {
1078 struct userdata *u = s->userdata;
1079
1080 pa_assert(u);
1081 pa_assert(u->mixer_path);
1082 pa_assert(u->mixer_handle);
1083
1084 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1085 }
1086
1087 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1088 struct userdata *u = s->userdata;
1089 pa_alsa_port_data *data;
1090
1091 pa_assert(u);
1092 pa_assert(p);
1093 pa_assert(u->mixer_handle);
1094
1095 data = PA_DEVICE_PORT_DATA(p);
1096
1097 pa_assert_se(u->mixer_path = data->path);
1098 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1099
1100 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1101 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1102 s->n_volume_steps = PA_VOLUME_NORM+1;
1103
1104 if (u->mixer_path->max_dB > 0.0)
1105 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1106 else
1107 pa_log_info("No particular base volume set, fixing to 0 dB");
1108 } else {
1109 s->base_volume = PA_VOLUME_NORM;
1110 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1111 }
1112
1113 if (data->setting)
1114 pa_alsa_setting_select(data->setting, u->mixer_handle);
1115
1116 if (s->set_mute)
1117 s->set_mute(s);
1118 if (s->set_volume)
1119 s->set_volume(s);
1120
1121 return 0;
1122 }
1123
1124 static void sink_update_requested_latency_cb(pa_sink *s) {
1125 struct userdata *u = s->userdata;
1126 size_t before;
1127 pa_assert(u);
1128
1129 if (!u->pcm_handle)
1130 return;
1131
1132 before = u->hwbuf_unused;
1133 update_sw_params(u);
1134
1135 /* Let's check whether we now use only a smaller part of the
1136 buffer then before. If so, we need to make sure that subsequent
1137 rewinds are relative to the new maximum fill level and not to the
1138 current fill level. Thus, let's do a full rewind once, to clear
1139 things up. */
1140
1141 if (u->hwbuf_unused > before) {
1142 pa_log_debug("Requesting rewind due to latency change.");
1143 pa_sink_request_rewind(s, (size_t) -1);
1144 }
1145 }
1146
1147 static int process_rewind(struct userdata *u) {
1148 snd_pcm_sframes_t unused;
1149 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1150 pa_assert(u);
1151
1152 /* Figure out how much we shall rewind and reset the counter */
1153 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1154
1155 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1156
1157 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1158 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1159 return -1;
1160 }
1161
1162 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1163
1164 if (u->hwbuf_size > unused_nbytes)
1165 limit_nbytes = u->hwbuf_size - unused_nbytes;
1166 else
1167 limit_nbytes = 0;
1168
1169 if (rewind_nbytes > limit_nbytes)
1170 rewind_nbytes = limit_nbytes;
1171
1172 if (rewind_nbytes > 0) {
1173 snd_pcm_sframes_t in_frames, out_frames;
1174
1175 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1176
1177 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1178 pa_log_debug("before: %lu", (unsigned long) in_frames);
1179 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1180 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1181 return -1;
1182 }
1183 pa_log_debug("after: %lu", (unsigned long) out_frames);
1184
1185 rewind_nbytes = (size_t) out_frames * u->frame_size;
1186
1187 if (rewind_nbytes <= 0)
1188 pa_log_info("Tried rewind, but was apparently not possible.");
1189 else {
1190 u->write_count -= out_frames * u->frame_size;
1191 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1192 pa_sink_process_rewind(u->sink, rewind_nbytes);
1193
1194 u->after_rewind = TRUE;
1195 return 0;
1196 }
1197 } else
1198 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1199
1200 pa_sink_process_rewind(u->sink, 0);
1201 return 0;
1202 }
1203
1204 static void thread_func(void *userdata) {
1205 struct userdata *u = userdata;
1206 unsigned short revents = 0;
1207
1208 pa_assert(u);
1209
1210 pa_log_debug("Thread starting up");
1211
1212 if (u->core->realtime_scheduling)
1213 pa_make_realtime(u->core->realtime_priority);
1214
1215 pa_thread_mq_install(&u->thread_mq);
1216 pa_rtpoll_install(u->rtpoll);
1217
1218 for (;;) {
1219 int ret;
1220
1221 #ifdef DEBUG_TIMING
1222 pa_log_debug("Loop");
1223 #endif
1224
1225 /* Render some data and write it to the dsp */
1226 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1227 int work_done;
1228 pa_usec_t sleep_usec = 0;
1229
1230 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1231 if (process_rewind(u) < 0)
1232 goto fail;
1233
1234 if (u->use_mmap)
1235 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1236 else
1237 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1238
1239 if (work_done < 0)
1240 goto fail;
1241
1242 /* pa_log_debug("work_done = %i", work_done); */
1243
1244 if (work_done) {
1245
1246 if (u->first) {
1247 pa_log_info("Starting playback.");
1248 snd_pcm_start(u->pcm_handle);
1249
1250 pa_smoother_resume(u->smoother, pa_rtclock_usec(), TRUE);
1251 }
1252
1253 update_smoother(u);
1254 }
1255
1256 if (u->use_tsched) {
1257 pa_usec_t cusec;
1258
1259 if (u->since_start <= u->hwbuf_size) {
1260
1261 /* USB devices on ALSA seem to hit a buffer
1262 * underrun during the first iterations much
1263 * quicker then we calculate here, probably due to
1264 * the transport latency. To accommodate for that
1265 * we artificially decrease the sleep time until
1266 * we have filled the buffer at least once
1267 * completely.*/
1268
1269 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1270 sleep_usec /= 2;
1271 }
1272
1273 /* OK, the playback buffer is now full, let's
1274 * calculate when to wake up next */
1275 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1276
1277 /* Convert from the sound card time domain to the
1278 * system time domain */
1279 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1280
1281 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1282
1283 /* We don't trust the conversion, so we wake up whatever comes first */
1284 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1285 }
1286
1287 u->first = FALSE;
1288 u->after_rewind = FALSE;
1289
1290 } else if (u->use_tsched)
1291
1292 /* OK, we're in an invalid state, let's disable our timers */
1293 pa_rtpoll_set_timer_disabled(u->rtpoll);
1294
1295 /* Hmm, nothing to do. Let's sleep */
1296 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1297 goto fail;
1298
1299 if (ret == 0)
1300 goto finish;
1301
1302 /* Tell ALSA about this and process its response */
1303 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1304 struct pollfd *pollfd;
1305 int err;
1306 unsigned n;
1307
1308 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1309
1310 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1311 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1312 goto fail;
1313 }
1314
1315 if (revents & ~POLLOUT) {
1316 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1317 goto fail;
1318
1319 u->first = TRUE;
1320 u->since_start = 0;
1321 } else if (revents && u->use_tsched && pa_log_ratelimit())
1322 pa_log_debug("Wakeup from ALSA!");
1323
1324 } else
1325 revents = 0;
1326 }
1327
1328 fail:
1329 /* If this was no regular exit from the loop we have to continue
1330 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1331 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1332 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1333
1334 finish:
1335 pa_log_debug("Thread shutting down");
1336 }
1337
1338 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name) {
1339 const char *n;
1340 char *t;
1341
1342 pa_assert(data);
1343 pa_assert(ma);
1344 pa_assert(device_name);
1345
1346 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1347 pa_sink_new_data_set_name(data, n);
1348 data->namereg_fail = TRUE;
1349 return;
1350 }
1351
1352 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1353 data->namereg_fail = TRUE;
1354 else {
1355 n = device_id ? device_id : device_name;
1356 data->namereg_fail = FALSE;
1357 }
1358
1359 t = pa_sprintf_malloc("alsa_output.%s", n);
1360 pa_sink_new_data_set_name(data, t);
1361 pa_xfree(t);
1362 }
1363
1364 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1365
1366 if (!mapping && !element)
1367 return;
1368
1369 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1370 pa_log_info("Failed to find a working mixer device.");
1371 return;
1372 }
1373
1374 if (element) {
1375
1376 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1377 goto fail;
1378
1379 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1380 goto fail;
1381
1382 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1383 pa_alsa_path_dump(u->mixer_path);
1384 } else {
1385
1386 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1387 goto fail;
1388
1389 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1390
1391 pa_log_debug("Probed mixer paths:");
1392 pa_alsa_path_set_dump(u->mixer_path_set);
1393 }
1394
1395 return;
1396
1397 fail:
1398
1399 if (u->mixer_path_set) {
1400 pa_alsa_path_set_free(u->mixer_path_set);
1401 u->mixer_path_set = NULL;
1402 } else if (u->mixer_path) {
1403 pa_alsa_path_free(u->mixer_path);
1404 u->mixer_path = NULL;
1405 }
1406
1407 if (u->mixer_handle) {
1408 snd_mixer_close(u->mixer_handle);
1409 u->mixer_handle = NULL;
1410 }
1411 }
1412
1413 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1414 pa_assert(u);
1415
1416 if (!u->mixer_handle)
1417 return 0;
1418
1419 if (u->sink->active_port) {
1420 pa_alsa_port_data *data;
1421
1422 /* We have a list of supported paths, so let's activate the
1423 * one that has been chosen as active */
1424
1425 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1426 u->mixer_path = data->path;
1427
1428 pa_alsa_path_select(data->path, u->mixer_handle);
1429
1430 if (data->setting)
1431 pa_alsa_setting_select(data->setting, u->mixer_handle);
1432
1433 } else {
1434
1435 if (!u->mixer_path && u->mixer_path_set)
1436 u->mixer_path = u->mixer_path_set->paths;
1437
1438 if (u->mixer_path) {
1439 /* Hmm, we have only a single path, then let's activate it */
1440
1441 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1442
1443 if (u->mixer_path->settings)
1444 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1445 } else
1446 return 0;
1447 }
1448
1449 if (!u->mixer_path->has_volume)
1450 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1451 else {
1452
1453 if (u->mixer_path->has_dB) {
1454 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1455
1456 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1457 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1458
1459 if (u->mixer_path->max_dB > 0.0)
1460 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1461 else
1462 pa_log_info("No particular base volume set, fixing to 0 dB");
1463
1464 } else {
1465 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1466 u->sink->base_volume = PA_VOLUME_NORM;
1467 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1468 }
1469
1470 u->sink->get_volume = sink_get_volume_cb;
1471 u->sink->set_volume = sink_set_volume_cb;
1472
1473 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1474 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1475 }
1476
1477 if (!u->mixer_path->has_mute) {
1478 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1479 } else {
1480 u->sink->get_mute = sink_get_mute_cb;
1481 u->sink->set_mute = sink_set_mute_cb;
1482 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1483 pa_log_info("Using hardware mute control.");
1484 }
1485
1486 u->mixer_fdl = pa_alsa_fdlist_new();
1487
1488 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1489 pa_log("Failed to initialize file descriptor monitoring");
1490 return -1;
1491 }
1492
1493 if (u->mixer_path_set)
1494 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1495 else
1496 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1497
1498 return 0;
1499 }
1500
1501 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1502
1503 struct userdata *u = NULL;
1504 const char *dev_id = NULL;
1505 pa_sample_spec ss, requested_ss;
1506 pa_channel_map map;
1507 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1508 snd_pcm_uframes_t period_frames, tsched_frames;
1509 size_t frame_size;
1510 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1511 pa_sink_new_data data;
1512 pa_alsa_profile_set *profile_set = NULL;
1513
1514 pa_assert(m);
1515 pa_assert(ma);
1516
1517 ss = m->core->default_sample_spec;
1518 map = m->core->default_channel_map;
1519 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1520 pa_log("Failed to parse sample specification and channel map");
1521 goto fail;
1522 }
1523
1524 requested_ss = ss;
1525 frame_size = pa_frame_size(&ss);
1526
1527 nfrags = m->core->default_n_fragments;
1528 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1529 if (frag_size <= 0)
1530 frag_size = (uint32_t) frame_size;
1531 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1532 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1533
1534 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1535 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1536 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1537 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1538 pa_log("Failed to parse buffer metrics");
1539 goto fail;
1540 }
1541
1542 hwbuf_size = frag_size * nfrags;
1543 period_frames = frag_size/frame_size;
1544 tsched_frames = tsched_size/frame_size;
1545
1546 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1547 pa_log("Failed to parse mmap argument.");
1548 goto fail;
1549 }
1550
1551 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1552 pa_log("Failed to parse tsched argument.");
1553 goto fail;
1554 }
1555
1556 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1557 pa_log("Failed to parse ignore_dB argument.");
1558 goto fail;
1559 }
1560
1561 if (use_tsched && !pa_rtclock_hrtimer()) {
1562 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1563 use_tsched = FALSE;
1564 }
1565
1566 u = pa_xnew0(struct userdata, 1);
1567 u->core = m->core;
1568 u->module = m;
1569 u->use_mmap = use_mmap;
1570 u->use_tsched = use_tsched;
1571 u->first = TRUE;
1572 u->rtpoll = pa_rtpoll_new();
1573 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1574
1575 u->smoother = pa_smoother_new(
1576 DEFAULT_TSCHED_BUFFER_USEC*2,
1577 DEFAULT_TSCHED_BUFFER_USEC*2,
1578 TRUE,
1579 TRUE,
1580 5,
1581 pa_rtclock_usec(),
1582 TRUE);
1583
1584 dev_id = pa_modargs_get_value(
1585 ma, "device_id",
1586 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1587
1588 if (reserve_init(u, dev_id) < 0)
1589 goto fail;
1590
1591 if (reserve_monitor_init(u, dev_id) < 0)
1592 goto fail;
1593
1594 b = use_mmap;
1595 d = use_tsched;
1596
1597 if (mapping) {
1598
1599 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1600 pa_log("device_id= not set");
1601 goto fail;
1602 }
1603
1604 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1605 dev_id,
1606 &u->device_name,
1607 &ss, &map,
1608 SND_PCM_STREAM_PLAYBACK,
1609 &nfrags, &period_frames, tsched_frames,
1610 &b, &d, mapping)))
1611
1612 goto fail;
1613
1614 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1615
1616 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1617 goto fail;
1618
1619 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1620 dev_id,
1621 &u->device_name,
1622 &ss, &map,
1623 SND_PCM_STREAM_PLAYBACK,
1624 &nfrags, &period_frames, tsched_frames,
1625 &b, &d, profile_set, &mapping)))
1626
1627 goto fail;
1628
1629 } else {
1630
1631 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1632 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1633 &u->device_name,
1634 &ss, &map,
1635 SND_PCM_STREAM_PLAYBACK,
1636 &nfrags, &period_frames, tsched_frames,
1637 &b, &d, FALSE)))
1638 goto fail;
1639 }
1640
1641 pa_assert(u->device_name);
1642 pa_log_info("Successfully opened device %s.", u->device_name);
1643
1644 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1645 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1646 goto fail;
1647 }
1648
1649 if (mapping)
1650 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1651
1652 if (use_mmap && !b) {
1653 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1654 u->use_mmap = use_mmap = FALSE;
1655 }
1656
1657 if (use_tsched && (!b || !d)) {
1658 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1659 u->use_tsched = use_tsched = FALSE;
1660 }
1661
1662 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1663 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1664 u->use_tsched = use_tsched = FALSE;
1665 }
1666
1667 if (u->use_mmap)
1668 pa_log_info("Successfully enabled mmap() mode.");
1669
1670 if (u->use_tsched)
1671 pa_log_info("Successfully enabled timer-based scheduling mode.");
1672
1673 /* ALSA might tweak the sample spec, so recalculate the frame size */
1674 frame_size = pa_frame_size(&ss);
1675
1676 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1677
1678 pa_sink_new_data_init(&data);
1679 data.driver = driver;
1680 data.module = m;
1681 data.card = card;
1682 set_sink_name(&data, ma, dev_id, u->device_name);
1683 pa_sink_new_data_set_sample_spec(&data, &ss);
1684 pa_sink_new_data_set_channel_map(&data, &map);
1685
1686 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1687 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1688 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1689 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1690 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1691
1692 if (mapping) {
1693 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1694 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1695 }
1696
1697 pa_alsa_init_description(data.proplist);
1698
1699 if (u->control_device)
1700 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1701
1702 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1703 pa_log("Invalid properties");
1704 pa_sink_new_data_done(&data);
1705 goto fail;
1706 }
1707
1708 if (u->mixer_path_set)
1709 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1710
1711 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1712 pa_sink_new_data_done(&data);
1713
1714 if (!u->sink) {
1715 pa_log("Failed to create sink object");
1716 goto fail;
1717 }
1718
1719 u->sink->parent.process_msg = sink_process_msg;
1720 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1721 u->sink->set_state = sink_set_state_cb;
1722 u->sink->set_port = sink_set_port_cb;
1723 u->sink->userdata = u;
1724
1725 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1726 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1727
1728 u->frame_size = frame_size;
1729 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1730 u->nfragments = nfrags;
1731 u->hwbuf_size = u->fragment_size * nfrags;
1732 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1733 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1734
1735 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1736 nfrags, (long unsigned) u->fragment_size,
1737 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1738
1739 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1740 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1741
1742 if (u->use_tsched) {
1743 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1744
1745 fix_min_sleep_wakeup(u);
1746 fix_tsched_watermark(u);
1747
1748 pa_sink_set_latency_range(u->sink,
1749 0,
1750 pa_bytes_to_usec(u->hwbuf_size, &ss));
1751
1752 pa_log_info("Time scheduling watermark is %0.2fms",
1753 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1754 } else
1755 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1756
1757 reserve_update(u);
1758
1759 if (update_sw_params(u) < 0)
1760 goto fail;
1761
1762 if (setup_mixer(u, ignore_dB) < 0)
1763 goto fail;
1764
1765 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1766
1767 if (!(u->thread = pa_thread_new(thread_func, u))) {
1768 pa_log("Failed to create thread.");
1769 goto fail;
1770 }
1771
1772 /* Get initial mixer settings */
1773 if (data.volume_is_set) {
1774 if (u->sink->set_volume)
1775 u->sink->set_volume(u->sink);
1776 } else {
1777 if (u->sink->get_volume)
1778 u->sink->get_volume(u->sink);
1779 }
1780
1781 if (data.muted_is_set) {
1782 if (u->sink->set_mute)
1783 u->sink->set_mute(u->sink);
1784 } else {
1785 if (u->sink->get_mute)
1786 u->sink->get_mute(u->sink);
1787 }
1788
1789 pa_sink_put(u->sink);
1790
1791 if (profile_set)
1792 pa_alsa_profile_set_free(profile_set);
1793
1794 return u->sink;
1795
1796 fail:
1797
1798 if (u)
1799 userdata_free(u);
1800
1801 if (profile_set)
1802 pa_alsa_profile_set_free(profile_set);
1803
1804 return NULL;
1805 }
1806
1807 static void userdata_free(struct userdata *u) {
1808 pa_assert(u);
1809
1810 if (u->sink)
1811 pa_sink_unlink(u->sink);
1812
1813 if (u->thread) {
1814 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1815 pa_thread_free(u->thread);
1816 }
1817
1818 pa_thread_mq_done(&u->thread_mq);
1819
1820 if (u->sink)
1821 pa_sink_unref(u->sink);
1822
1823 if (u->memchunk.memblock)
1824 pa_memblock_unref(u->memchunk.memblock);
1825
1826 if (u->alsa_rtpoll_item)
1827 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1828
1829 if (u->rtpoll)
1830 pa_rtpoll_free(u->rtpoll);
1831
1832 if (u->pcm_handle) {
1833 snd_pcm_drop(u->pcm_handle);
1834 snd_pcm_close(u->pcm_handle);
1835 }
1836
1837 if (u->mixer_fdl)
1838 pa_alsa_fdlist_free(u->mixer_fdl);
1839
1840 if (u->mixer_path_set)
1841 pa_alsa_path_set_free(u->mixer_path_set);
1842 else if (u->mixer_path)
1843 pa_alsa_path_free(u->mixer_path);
1844
1845 if (u->mixer_handle)
1846 snd_mixer_close(u->mixer_handle);
1847
1848 if (u->smoother)
1849 pa_smoother_free(u->smoother);
1850
1851 reserve_done(u);
1852 monitor_done(u);
1853
1854 pa_xfree(u->device_name);
1855 pa_xfree(u->control_device);
1856 pa_xfree(u);
1857 }
1858
1859 void pa_alsa_sink_free(pa_sink *s) {
1860 struct userdata *u;
1861
1862 pa_sink_assert_ref(s);
1863 pa_assert_se(u = s->userdata);
1864
1865 userdata_free(u);
1866 }