]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
core: get rid of rt sig/timer handling since modern Linux' ppooll() is finally fixed...
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38 #include <pulse/i18n.h>
39
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/rtclock.h>
54 #include <pulsecore/time-smoother.h>
55
56 #include <modules/reserve-wrap.h>
57
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
60
61 /* #define DEBUG_TIMING */
62
63 #define DEFAULT_DEVICE "default"
64 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
65 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
66 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
67 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
68 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
69
70 struct userdata {
71 pa_core *core;
72 pa_module *module;
73 pa_sink *sink;
74
75 pa_thread *thread;
76 pa_thread_mq thread_mq;
77 pa_rtpoll *rtpoll;
78
79 snd_pcm_t *pcm_handle;
80
81 pa_alsa_fdlist *mixer_fdl;
82 snd_mixer_t *mixer_handle;
83 pa_alsa_path_set *mixer_path_set;
84 pa_alsa_path *mixer_path;
85
86 pa_cvolume hardware_volume;
87
88 size_t
89 frame_size,
90 fragment_size,
91 hwbuf_size,
92 tsched_watermark,
93 hwbuf_unused,
94 min_sleep,
95 min_wakeup,
96 watermark_step;
97
98 unsigned nfragments;
99 pa_memchunk memchunk;
100
101 char *device_name; /* name of the PCM device */
102 char *control_device; /* name of the control device */
103
104 pa_bool_t use_mmap:1, use_tsched:1;
105
106 pa_bool_t first, after_rewind;
107
108 pa_rtpoll_item *alsa_rtpoll_item;
109
110 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
111
112 pa_smoother *smoother;
113 uint64_t write_count;
114 uint64_t since_start;
115
116 pa_reserve_wrapper *reserve;
117 pa_hook_slot *reserve_slot;
118 pa_reserve_monitor_wrapper *monitor;
119 pa_hook_slot *monitor_slot;
120 };
121
122 static void userdata_free(struct userdata *u);
123
124 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
125 pa_assert(r);
126 pa_assert(u);
127
128 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
129 return PA_HOOK_CANCEL;
130
131 return PA_HOOK_OK;
132 }
133
134 static void reserve_done(struct userdata *u) {
135 pa_assert(u);
136
137 if (u->reserve_slot) {
138 pa_hook_slot_free(u->reserve_slot);
139 u->reserve_slot = NULL;
140 }
141
142 if (u->reserve) {
143 pa_reserve_wrapper_unref(u->reserve);
144 u->reserve = NULL;
145 }
146 }
147
148 static void reserve_update(struct userdata *u) {
149 const char *description;
150 pa_assert(u);
151
152 if (!u->sink || !u->reserve)
153 return;
154
155 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
156 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
157 }
158
159 static int reserve_init(struct userdata *u, const char *dname) {
160 char *rname;
161
162 pa_assert(u);
163 pa_assert(dname);
164
165 if (u->reserve)
166 return 0;
167
168 if (pa_in_system_mode())
169 return 0;
170
171 /* We are resuming, try to lock the device */
172 if (!(rname = pa_alsa_get_reserve_name(dname)))
173 return 0;
174
175 u->reserve = pa_reserve_wrapper_get(u->core, rname);
176 pa_xfree(rname);
177
178 if (!(u->reserve))
179 return -1;
180
181 reserve_update(u);
182
183 pa_assert(!u->reserve_slot);
184 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
185
186 return 0;
187 }
188
189 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
190 pa_bool_t b;
191
192 pa_assert(w);
193 pa_assert(u);
194
195 b = PA_PTR_TO_UINT(busy) && !u->reserve;
196
197 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
198 return PA_HOOK_OK;
199 }
200
201 static void monitor_done(struct userdata *u) {
202 pa_assert(u);
203
204 if (u->monitor_slot) {
205 pa_hook_slot_free(u->monitor_slot);
206 u->monitor_slot = NULL;
207 }
208
209 if (u->monitor) {
210 pa_reserve_monitor_wrapper_unref(u->monitor);
211 u->monitor = NULL;
212 }
213 }
214
215 static int reserve_monitor_init(struct userdata *u, const char *dname) {
216 char *rname;
217
218 pa_assert(u);
219 pa_assert(dname);
220
221 if (pa_in_system_mode())
222 return 0;
223
224 /* We are resuming, try to lock the device */
225 if (!(rname = pa_alsa_get_reserve_name(dname)))
226 return 0;
227
228 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
229 pa_xfree(rname);
230
231 if (!(u->monitor))
232 return -1;
233
234 pa_assert(!u->monitor_slot);
235 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
236
237 return 0;
238 }
239
240 static void fix_min_sleep_wakeup(struct userdata *u) {
241 size_t max_use, max_use_2;
242
243 pa_assert(u);
244
245 max_use = u->hwbuf_size - u->hwbuf_unused;
246 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
247
248 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
249 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
250
251 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
252 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
253 }
254
255 static void fix_tsched_watermark(struct userdata *u) {
256 size_t max_use;
257 pa_assert(u);
258
259 max_use = u->hwbuf_size - u->hwbuf_unused;
260
261 if (u->tsched_watermark > max_use - u->min_sleep)
262 u->tsched_watermark = max_use - u->min_sleep;
263
264 if (u->tsched_watermark < u->min_wakeup)
265 u->tsched_watermark = u->min_wakeup;
266 }
267
268 static void adjust_after_underrun(struct userdata *u) {
269 size_t old_watermark;
270 pa_usec_t old_min_latency, new_min_latency;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 /* First, just try to increase the watermark */
276 old_watermark = u->tsched_watermark;
277 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
278 fix_tsched_watermark(u);
279
280 if (old_watermark != u->tsched_watermark) {
281 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
282 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
283 return;
284 }
285
286 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
287 old_min_latency = u->sink->thread_info.min_latency;
288 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
289 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
290
291 if (old_min_latency != new_min_latency) {
292 pa_log_notice("Increasing minimal latency to %0.2f ms",
293 (double) new_min_latency / PA_USEC_PER_MSEC);
294
295 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
296 return;
297 }
298
299 /* When we reach this we're officialy fucked! */
300 }
301
302 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
303 pa_usec_t usec, wm;
304
305 pa_assert(sleep_usec);
306 pa_assert(process_usec);
307
308 pa_assert(u);
309
310 usec = pa_sink_get_requested_latency_within_thread(u->sink);
311
312 if (usec == (pa_usec_t) -1)
313 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
314
315 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
316
317 if (wm > usec)
318 wm = usec/2;
319
320 *sleep_usec = usec - wm;
321 *process_usec = wm;
322
323 #ifdef DEBUG_TIMING
324 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
325 (unsigned long) (usec / PA_USEC_PER_MSEC),
326 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
327 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
328 #endif
329 }
330
331 static int try_recover(struct userdata *u, const char *call, int err) {
332 pa_assert(u);
333 pa_assert(call);
334 pa_assert(err < 0);
335
336 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
337
338 pa_assert(err != -EAGAIN);
339
340 if (err == -EPIPE)
341 pa_log_debug("%s: Buffer underrun!", call);
342
343 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
344 pa_log("%s: %s", call, pa_alsa_strerror(err));
345 return -1;
346 }
347
348 u->first = TRUE;
349 u->since_start = 0;
350 return 0;
351 }
352
353 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
354 size_t left_to_play;
355
356 /* We use <= instead of < for this check here because an underrun
357 * only happens after the last sample was processed, not already when
358 * it is removed from the buffer. This is particularly important
359 * when block transfer is used. */
360
361 if (n_bytes <= u->hwbuf_size) {
362 left_to_play = u->hwbuf_size - n_bytes;
363
364 #ifdef DEBUG_TIMING
365 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
366 #endif
367
368 } else {
369 left_to_play = 0;
370
371 #ifdef DEBUG_TIMING
372 PA_DEBUG_TRAP;
373 #endif
374
375 if (!u->first && !u->after_rewind) {
376
377 if (pa_log_ratelimit())
378 pa_log_info("Underrun!");
379
380 if (u->use_tsched)
381 adjust_after_underrun(u);
382 }
383 }
384
385 return left_to_play;
386 }
387
388 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
389 pa_bool_t work_done = TRUE;
390 pa_usec_t max_sleep_usec = 0, process_usec = 0;
391 size_t left_to_play;
392 unsigned j = 0;
393
394 pa_assert(u);
395 pa_sink_assert_ref(u->sink);
396
397 if (u->use_tsched)
398 hw_sleep_time(u, &max_sleep_usec, &process_usec);
399
400 for (;;) {
401 snd_pcm_sframes_t n;
402 size_t n_bytes;
403 int r;
404
405 /* First we determine how many samples are missing to fill the
406 * buffer up to 100% */
407
408 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
409
410 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
411 continue;
412
413 return r;
414 }
415
416 n_bytes = (size_t) n * u->frame_size;
417
418 #ifdef DEBUG_TIMING
419 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
420 #endif
421
422 left_to_play = check_left_to_play(u, n_bytes);
423
424 if (u->use_tsched)
425
426 /* We won't fill up the playback buffer before at least
427 * half the sleep time is over because otherwise we might
428 * ask for more data from the clients then they expect. We
429 * need to guarantee that clients only have to keep around
430 * a single hw buffer length. */
431
432 if (!polled &&
433 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
434 #ifdef DEBUG_TIMING
435 pa_log_debug("Not filling up, because too early.");
436 #endif
437 break;
438 }
439
440 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
441
442 if (polled)
443 PA_ONCE_BEGIN {
444 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
445 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
446 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
447 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
448 pa_strnull(dn));
449 pa_xfree(dn);
450 } PA_ONCE_END;
451
452 #ifdef DEBUG_TIMING
453 pa_log_debug("Not filling up, because not necessary.");
454 #endif
455 break;
456 }
457
458
459 if (++j > 10) {
460 #ifdef DEBUG_TIMING
461 pa_log_debug("Not filling up, because already too many iterations.");
462 #endif
463
464 break;
465 }
466
467 n_bytes -= u->hwbuf_unused;
468 polled = FALSE;
469
470 #ifdef DEBUG_TIMING
471 pa_log_debug("Filling up");
472 #endif
473
474 for (;;) {
475 pa_memchunk chunk;
476 void *p;
477 int err;
478 const snd_pcm_channel_area_t *areas;
479 snd_pcm_uframes_t offset, frames;
480 snd_pcm_sframes_t sframes;
481
482 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
483 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
484
485 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
486
487 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
488 continue;
489
490 return r;
491 }
492
493 /* Make sure that if these memblocks need to be copied they will fit into one slot */
494 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
495 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
496
497 /* Check these are multiples of 8 bit */
498 pa_assert((areas[0].first & 7) == 0);
499 pa_assert((areas[0].step & 7)== 0);
500
501 /* We assume a single interleaved memory buffer */
502 pa_assert((areas[0].first >> 3) == 0);
503 pa_assert((areas[0].step >> 3) == u->frame_size);
504
505 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
506
507 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
508 chunk.length = pa_memblock_get_length(chunk.memblock);
509 chunk.index = 0;
510
511 pa_sink_render_into_full(u->sink, &chunk);
512 pa_memblock_unref_fixed(chunk.memblock);
513
514 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
515
516 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
517 continue;
518
519 return r;
520 }
521
522 work_done = TRUE;
523
524 u->write_count += frames * u->frame_size;
525 u->since_start += frames * u->frame_size;
526
527 #ifdef DEBUG_TIMING
528 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
529 #endif
530
531 if ((size_t) frames * u->frame_size >= n_bytes)
532 break;
533
534 n_bytes -= (size_t) frames * u->frame_size;
535 }
536 }
537
538 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
539
540 if (*sleep_usec > process_usec)
541 *sleep_usec -= process_usec;
542 else
543 *sleep_usec = 0;
544
545 return work_done ? 1 : 0;
546 }
547
548 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
549 pa_bool_t work_done = FALSE;
550 pa_usec_t max_sleep_usec = 0, process_usec = 0;
551 size_t left_to_play;
552 unsigned j = 0;
553
554 pa_assert(u);
555 pa_sink_assert_ref(u->sink);
556
557 if (u->use_tsched)
558 hw_sleep_time(u, &max_sleep_usec, &process_usec);
559
560 for (;;) {
561 snd_pcm_sframes_t n;
562 size_t n_bytes;
563 int r;
564
565 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
566
567 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
568 continue;
569
570 return r;
571 }
572
573 n_bytes = (size_t) n * u->frame_size;
574 left_to_play = check_left_to_play(u, n_bytes);
575
576 if (u->use_tsched)
577
578 /* We won't fill up the playback buffer before at least
579 * half the sleep time is over because otherwise we might
580 * ask for more data from the clients then they expect. We
581 * need to guarantee that clients only have to keep around
582 * a single hw buffer length. */
583
584 if (!polled &&
585 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
586 break;
587
588 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
589
590 if (polled)
591 PA_ONCE_BEGIN {
592 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
593 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
594 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
595 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
596 pa_strnull(dn));
597 pa_xfree(dn);
598 } PA_ONCE_END;
599
600 break;
601 }
602
603 if (++j > 10) {
604 #ifdef DEBUG_TIMING
605 pa_log_debug("Not filling up, because already too many iterations.");
606 #endif
607
608 break;
609 }
610
611 n_bytes -= u->hwbuf_unused;
612 polled = FALSE;
613
614 for (;;) {
615 snd_pcm_sframes_t frames;
616 void *p;
617
618 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
619
620 if (u->memchunk.length <= 0)
621 pa_sink_render(u->sink, n_bytes, &u->memchunk);
622
623 pa_assert(u->memchunk.length > 0);
624
625 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
626
627 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
628 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
629
630 p = pa_memblock_acquire(u->memchunk.memblock);
631 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
632 pa_memblock_release(u->memchunk.memblock);
633
634 pa_assert(frames != 0);
635
636 if (PA_UNLIKELY(frames < 0)) {
637
638 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
639 continue;
640
641 return r;
642 }
643
644 u->memchunk.index += (size_t) frames * u->frame_size;
645 u->memchunk.length -= (size_t) frames * u->frame_size;
646
647 if (u->memchunk.length <= 0) {
648 pa_memblock_unref(u->memchunk.memblock);
649 pa_memchunk_reset(&u->memchunk);
650 }
651
652 work_done = TRUE;
653
654 u->write_count += frames * u->frame_size;
655 u->since_start += frames * u->frame_size;
656
657 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
658
659 if ((size_t) frames * u->frame_size >= n_bytes)
660 break;
661
662 n_bytes -= (size_t) frames * u->frame_size;
663 }
664 }
665
666 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
667
668 if (*sleep_usec > process_usec)
669 *sleep_usec -= process_usec;
670 else
671 *sleep_usec = 0;
672
673 return work_done ? 1 : 0;
674 }
675
676 static void update_smoother(struct userdata *u) {
677 snd_pcm_sframes_t delay = 0;
678 int64_t position;
679 int err;
680 pa_usec_t now1 = 0, now2;
681 snd_pcm_status_t *status;
682
683 snd_pcm_status_alloca(&status);
684
685 pa_assert(u);
686 pa_assert(u->pcm_handle);
687
688 /* Let's update the time smoother */
689
690 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
691 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
692 return;
693 }
694
695 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
696 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
697 else {
698 snd_htimestamp_t htstamp = { 0, 0 };
699 snd_pcm_status_get_htstamp(status, &htstamp);
700 now1 = pa_timespec_load(&htstamp);
701 }
702
703 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
704
705 if (PA_UNLIKELY(position < 0))
706 position = 0;
707
708 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
709 if (now1 <= 0)
710 now1 = pa_rtclock_usec();
711
712 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
713
714 pa_smoother_put(u->smoother, now1, now2);
715 }
716
717 static pa_usec_t sink_get_latency(struct userdata *u) {
718 pa_usec_t r;
719 int64_t delay;
720 pa_usec_t now1, now2;
721
722 pa_assert(u);
723
724 now1 = pa_rtclock_usec();
725 now2 = pa_smoother_get(u->smoother, now1);
726
727 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
728
729 r = delay >= 0 ? (pa_usec_t) delay : 0;
730
731 if (u->memchunk.memblock)
732 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
733
734 return r;
735 }
736
737 static int build_pollfd(struct userdata *u) {
738 pa_assert(u);
739 pa_assert(u->pcm_handle);
740
741 if (u->alsa_rtpoll_item)
742 pa_rtpoll_item_free(u->alsa_rtpoll_item);
743
744 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
745 return -1;
746
747 return 0;
748 }
749
750 /* Called from IO context */
751 static int suspend(struct userdata *u) {
752 pa_assert(u);
753 pa_assert(u->pcm_handle);
754
755 pa_smoother_pause(u->smoother, pa_rtclock_usec());
756
757 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
758 * take awfully long with our long buffer sizes today. */
759 snd_pcm_close(u->pcm_handle);
760 u->pcm_handle = NULL;
761
762 if (u->alsa_rtpoll_item) {
763 pa_rtpoll_item_free(u->alsa_rtpoll_item);
764 u->alsa_rtpoll_item = NULL;
765 }
766
767 pa_log_info("Device suspended...");
768
769 return 0;
770 }
771
772 /* Called from IO context */
773 static int update_sw_params(struct userdata *u) {
774 snd_pcm_uframes_t avail_min;
775 int err;
776
777 pa_assert(u);
778
779 /* Use the full buffer if noone asked us for anything specific */
780 u->hwbuf_unused = 0;
781
782 if (u->use_tsched) {
783 pa_usec_t latency;
784
785 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
786 size_t b;
787
788 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
789
790 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
791
792 /* We need at least one sample in our buffer */
793
794 if (PA_UNLIKELY(b < u->frame_size))
795 b = u->frame_size;
796
797 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
798 }
799
800 fix_min_sleep_wakeup(u);
801 fix_tsched_watermark(u);
802 }
803
804 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
805
806 /* We need at last one frame in the used part of the buffer */
807 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
808
809 if (u->use_tsched) {
810 pa_usec_t sleep_usec, process_usec;
811
812 hw_sleep_time(u, &sleep_usec, &process_usec);
813 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
814 }
815
816 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
817
818 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
819 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
820 return err;
821 }
822
823 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
824
825 return 0;
826 }
827
828 /* Called from IO context */
829 static int unsuspend(struct userdata *u) {
830 pa_sample_spec ss;
831 int err;
832 pa_bool_t b, d;
833 unsigned nfrags;
834 snd_pcm_uframes_t period_size;
835
836 pa_assert(u);
837 pa_assert(!u->pcm_handle);
838
839 pa_log_info("Trying resume...");
840
841 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
842 /*SND_PCM_NONBLOCK|*/
843 SND_PCM_NO_AUTO_RESAMPLE|
844 SND_PCM_NO_AUTO_CHANNELS|
845 SND_PCM_NO_AUTO_FORMAT)) < 0) {
846 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
847 goto fail;
848 }
849
850 ss = u->sink->sample_spec;
851 nfrags = u->nfragments;
852 period_size = u->fragment_size / u->frame_size;
853 b = u->use_mmap;
854 d = u->use_tsched;
855
856 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
857 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
858 goto fail;
859 }
860
861 if (b != u->use_mmap || d != u->use_tsched) {
862 pa_log_warn("Resume failed, couldn't get original access mode.");
863 goto fail;
864 }
865
866 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
867 pa_log_warn("Resume failed, couldn't restore original sample settings.");
868 goto fail;
869 }
870
871 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
872 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
873 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
874 (unsigned long) nfrags, period_size * u->frame_size);
875 goto fail;
876 }
877
878 if (update_sw_params(u) < 0)
879 goto fail;
880
881 if (build_pollfd(u) < 0)
882 goto fail;
883
884 u->first = TRUE;
885 u->since_start = 0;
886
887 pa_log_info("Resumed successfully...");
888
889 return 0;
890
891 fail:
892 if (u->pcm_handle) {
893 snd_pcm_close(u->pcm_handle);
894 u->pcm_handle = NULL;
895 }
896
897 return -1;
898 }
899
900 /* Called from IO context */
901 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
902 struct userdata *u = PA_SINK(o)->userdata;
903
904 switch (code) {
905
906 case PA_SINK_MESSAGE_GET_LATENCY: {
907 pa_usec_t r = 0;
908
909 if (u->pcm_handle)
910 r = sink_get_latency(u);
911
912 *((pa_usec_t*) data) = r;
913
914 return 0;
915 }
916
917 case PA_SINK_MESSAGE_SET_STATE:
918
919 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
920
921 case PA_SINK_SUSPENDED:
922 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
923
924 if (suspend(u) < 0)
925 return -1;
926
927 break;
928
929 case PA_SINK_IDLE:
930 case PA_SINK_RUNNING:
931
932 if (u->sink->thread_info.state == PA_SINK_INIT) {
933 if (build_pollfd(u) < 0)
934 return -1;
935 }
936
937 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
938 if (unsuspend(u) < 0)
939 return -1;
940 }
941
942 break;
943
944 case PA_SINK_UNLINKED:
945 case PA_SINK_INIT:
946 case PA_SINK_INVALID_STATE:
947 ;
948 }
949
950 break;
951 }
952
953 return pa_sink_process_msg(o, code, data, offset, chunk);
954 }
955
956 /* Called from main context */
957 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
958 pa_sink_state_t old_state;
959 struct userdata *u;
960
961 pa_sink_assert_ref(s);
962 pa_assert_se(u = s->userdata);
963
964 old_state = pa_sink_get_state(u->sink);
965
966 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
967 reserve_done(u);
968 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
969 if (reserve_init(u, u->device_name) < 0)
970 return -1;
971
972 return 0;
973 }
974
975 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
976 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
977
978 pa_assert(u);
979 pa_assert(u->mixer_handle);
980
981 if (mask == SND_CTL_EVENT_MASK_REMOVE)
982 return 0;
983
984 if (mask & SND_CTL_EVENT_MASK_VALUE) {
985 pa_sink_get_volume(u->sink, TRUE, FALSE);
986 pa_sink_get_mute(u->sink, TRUE);
987 }
988
989 return 0;
990 }
991
992 static void sink_get_volume_cb(pa_sink *s) {
993 struct userdata *u = s->userdata;
994 pa_cvolume r;
995 char t[PA_CVOLUME_SNPRINT_MAX];
996
997 pa_assert(u);
998 pa_assert(u->mixer_path);
999 pa_assert(u->mixer_handle);
1000
1001 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1002 return;
1003
1004 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1005 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1006
1007 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1008
1009 if (pa_cvolume_equal(&u->hardware_volume, &r))
1010 return;
1011
1012 s->virtual_volume = u->hardware_volume = r;
1013
1014 if (u->mixer_path->has_dB) {
1015 pa_cvolume reset;
1016
1017 /* Hmm, so the hardware volume changed, let's reset our software volume */
1018 pa_cvolume_reset(&reset, s->sample_spec.channels);
1019 pa_sink_set_soft_volume(s, &reset);
1020 }
1021 }
1022
1023 static void sink_set_volume_cb(pa_sink *s) {
1024 struct userdata *u = s->userdata;
1025 pa_cvolume r;
1026 char t[PA_CVOLUME_SNPRINT_MAX];
1027
1028 pa_assert(u);
1029 pa_assert(u->mixer_path);
1030 pa_assert(u->mixer_handle);
1031
1032 /* Shift up by the base volume */
1033 pa_sw_cvolume_divide_scalar(&r, &s->virtual_volume, s->base_volume);
1034
1035 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1036 return;
1037
1038 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1039 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1040
1041 u->hardware_volume = r;
1042
1043 if (u->mixer_path->has_dB) {
1044
1045 /* Match exactly what the user requested by software */
1046 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1047
1048 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1049 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1050 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1051
1052 } else {
1053 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1054
1055 /* We can't match exactly what the user requested, hence let's
1056 * at least tell the user about it */
1057
1058 s->virtual_volume = r;
1059 }
1060 }
1061
1062 static void sink_get_mute_cb(pa_sink *s) {
1063 struct userdata *u = s->userdata;
1064 pa_bool_t b;
1065
1066 pa_assert(u);
1067 pa_assert(u->mixer_path);
1068 pa_assert(u->mixer_handle);
1069
1070 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1071 return;
1072
1073 s->muted = b;
1074 }
1075
1076 static void sink_set_mute_cb(pa_sink *s) {
1077 struct userdata *u = s->userdata;
1078
1079 pa_assert(u);
1080 pa_assert(u->mixer_path);
1081 pa_assert(u->mixer_handle);
1082
1083 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1084 }
1085
1086 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1087 struct userdata *u = s->userdata;
1088 pa_alsa_port_data *data;
1089
1090 pa_assert(u);
1091 pa_assert(p);
1092 pa_assert(u->mixer_handle);
1093
1094 data = PA_DEVICE_PORT_DATA(p);
1095
1096 pa_assert_se(u->mixer_path = data->path);
1097 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1098
1099 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1100 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1101 s->n_volume_steps = PA_VOLUME_NORM+1;
1102
1103 if (u->mixer_path->max_dB > 0.0)
1104 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1105 else
1106 pa_log_info("No particular base volume set, fixing to 0 dB");
1107 } else {
1108 s->base_volume = PA_VOLUME_NORM;
1109 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1110 }
1111
1112 if (data->setting)
1113 pa_alsa_setting_select(data->setting, u->mixer_handle);
1114
1115 if (s->set_mute)
1116 s->set_mute(s);
1117 if (s->set_volume)
1118 s->set_volume(s);
1119
1120 return 0;
1121 }
1122
1123 static void sink_update_requested_latency_cb(pa_sink *s) {
1124 struct userdata *u = s->userdata;
1125 size_t before;
1126 pa_assert(u);
1127
1128 if (!u->pcm_handle)
1129 return;
1130
1131 before = u->hwbuf_unused;
1132 update_sw_params(u);
1133
1134 /* Let's check whether we now use only a smaller part of the
1135 buffer then before. If so, we need to make sure that subsequent
1136 rewinds are relative to the new maximum fill level and not to the
1137 current fill level. Thus, let's do a full rewind once, to clear
1138 things up. */
1139
1140 if (u->hwbuf_unused > before) {
1141 pa_log_debug("Requesting rewind due to latency change.");
1142 pa_sink_request_rewind(s, (size_t) -1);
1143 }
1144 }
1145
1146 static int process_rewind(struct userdata *u) {
1147 snd_pcm_sframes_t unused;
1148 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1149 pa_assert(u);
1150
1151 /* Figure out how much we shall rewind and reset the counter */
1152 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1153
1154 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1155
1156 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1157 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1158 return -1;
1159 }
1160
1161 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1162
1163 if (u->hwbuf_size > unused_nbytes)
1164 limit_nbytes = u->hwbuf_size - unused_nbytes;
1165 else
1166 limit_nbytes = 0;
1167
1168 if (rewind_nbytes > limit_nbytes)
1169 rewind_nbytes = limit_nbytes;
1170
1171 if (rewind_nbytes > 0) {
1172 snd_pcm_sframes_t in_frames, out_frames;
1173
1174 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1175
1176 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1177 pa_log_debug("before: %lu", (unsigned long) in_frames);
1178 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1179 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1180 return -1;
1181 }
1182 pa_log_debug("after: %lu", (unsigned long) out_frames);
1183
1184 rewind_nbytes = (size_t) out_frames * u->frame_size;
1185
1186 if (rewind_nbytes <= 0)
1187 pa_log_info("Tried rewind, but was apparently not possible.");
1188 else {
1189 u->write_count -= out_frames * u->frame_size;
1190 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1191 pa_sink_process_rewind(u->sink, rewind_nbytes);
1192
1193 u->after_rewind = TRUE;
1194 return 0;
1195 }
1196 } else
1197 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1198
1199 pa_sink_process_rewind(u->sink, 0);
1200 return 0;
1201 }
1202
1203 static void thread_func(void *userdata) {
1204 struct userdata *u = userdata;
1205 unsigned short revents = 0;
1206
1207 pa_assert(u);
1208
1209 pa_log_debug("Thread starting up");
1210
1211 if (u->core->realtime_scheduling)
1212 pa_make_realtime(u->core->realtime_priority);
1213
1214 pa_thread_mq_install(&u->thread_mq);
1215
1216 for (;;) {
1217 int ret;
1218
1219 #ifdef DEBUG_TIMING
1220 pa_log_debug("Loop");
1221 #endif
1222
1223 /* Render some data and write it to the dsp */
1224 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1225 int work_done;
1226 pa_usec_t sleep_usec = 0;
1227
1228 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1229 if (process_rewind(u) < 0)
1230 goto fail;
1231
1232 if (u->use_mmap)
1233 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1234 else
1235 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1236
1237 if (work_done < 0)
1238 goto fail;
1239
1240 /* pa_log_debug("work_done = %i", work_done); */
1241
1242 if (work_done) {
1243
1244 if (u->first) {
1245 pa_log_info("Starting playback.");
1246 snd_pcm_start(u->pcm_handle);
1247
1248 pa_smoother_resume(u->smoother, pa_rtclock_usec(), TRUE);
1249 }
1250
1251 update_smoother(u);
1252 }
1253
1254 if (u->use_tsched) {
1255 pa_usec_t cusec;
1256
1257 if (u->since_start <= u->hwbuf_size) {
1258
1259 /* USB devices on ALSA seem to hit a buffer
1260 * underrun during the first iterations much
1261 * quicker then we calculate here, probably due to
1262 * the transport latency. To accommodate for that
1263 * we artificially decrease the sleep time until
1264 * we have filled the buffer at least once
1265 * completely.*/
1266
1267 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1268 sleep_usec /= 2;
1269 }
1270
1271 /* OK, the playback buffer is now full, let's
1272 * calculate when to wake up next */
1273 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1274
1275 /* Convert from the sound card time domain to the
1276 * system time domain */
1277 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1278
1279 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1280
1281 /* We don't trust the conversion, so we wake up whatever comes first */
1282 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1283 }
1284
1285 u->first = FALSE;
1286 u->after_rewind = FALSE;
1287
1288 } else if (u->use_tsched)
1289
1290 /* OK, we're in an invalid state, let's disable our timers */
1291 pa_rtpoll_set_timer_disabled(u->rtpoll);
1292
1293 /* Hmm, nothing to do. Let's sleep */
1294 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1295 goto fail;
1296
1297 if (ret == 0)
1298 goto finish;
1299
1300 /* Tell ALSA about this and process its response */
1301 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1302 struct pollfd *pollfd;
1303 int err;
1304 unsigned n;
1305
1306 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1307
1308 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1309 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1310 goto fail;
1311 }
1312
1313 if (revents & ~POLLOUT) {
1314 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1315 goto fail;
1316
1317 u->first = TRUE;
1318 u->since_start = 0;
1319 } else if (revents && u->use_tsched && pa_log_ratelimit())
1320 pa_log_debug("Wakeup from ALSA!");
1321
1322 } else
1323 revents = 0;
1324 }
1325
1326 fail:
1327 /* If this was no regular exit from the loop we have to continue
1328 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1329 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1330 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1331
1332 finish:
1333 pa_log_debug("Thread shutting down");
1334 }
1335
1336 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1337 const char *n;
1338 char *t;
1339
1340 pa_assert(data);
1341 pa_assert(ma);
1342 pa_assert(device_name);
1343
1344 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1345 pa_sink_new_data_set_name(data, n);
1346 data->namereg_fail = TRUE;
1347 return;
1348 }
1349
1350 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1351 data->namereg_fail = TRUE;
1352 else {
1353 n = device_id ? device_id : device_name;
1354 data->namereg_fail = FALSE;
1355 }
1356
1357 if (mapping)
1358 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1359 else
1360 t = pa_sprintf_malloc("alsa_output.%s", n);
1361
1362 pa_sink_new_data_set_name(data, t);
1363 pa_xfree(t);
1364 }
1365
1366 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1367
1368 if (!mapping && !element)
1369 return;
1370
1371 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1372 pa_log_info("Failed to find a working mixer device.");
1373 return;
1374 }
1375
1376 if (element) {
1377
1378 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1379 goto fail;
1380
1381 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1382 goto fail;
1383
1384 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1385 pa_alsa_path_dump(u->mixer_path);
1386 } else {
1387
1388 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1389 goto fail;
1390
1391 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1392
1393 pa_log_debug("Probed mixer paths:");
1394 pa_alsa_path_set_dump(u->mixer_path_set);
1395 }
1396
1397 return;
1398
1399 fail:
1400
1401 if (u->mixer_path_set) {
1402 pa_alsa_path_set_free(u->mixer_path_set);
1403 u->mixer_path_set = NULL;
1404 } else if (u->mixer_path) {
1405 pa_alsa_path_free(u->mixer_path);
1406 u->mixer_path = NULL;
1407 }
1408
1409 if (u->mixer_handle) {
1410 snd_mixer_close(u->mixer_handle);
1411 u->mixer_handle = NULL;
1412 }
1413 }
1414
1415 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1416 pa_assert(u);
1417
1418 if (!u->mixer_handle)
1419 return 0;
1420
1421 if (u->sink->active_port) {
1422 pa_alsa_port_data *data;
1423
1424 /* We have a list of supported paths, so let's activate the
1425 * one that has been chosen as active */
1426
1427 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1428 u->mixer_path = data->path;
1429
1430 pa_alsa_path_select(data->path, u->mixer_handle);
1431
1432 if (data->setting)
1433 pa_alsa_setting_select(data->setting, u->mixer_handle);
1434
1435 } else {
1436
1437 if (!u->mixer_path && u->mixer_path_set)
1438 u->mixer_path = u->mixer_path_set->paths;
1439
1440 if (u->mixer_path) {
1441 /* Hmm, we have only a single path, then let's activate it */
1442
1443 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1444
1445 if (u->mixer_path->settings)
1446 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1447 } else
1448 return 0;
1449 }
1450
1451 if (!u->mixer_path->has_volume)
1452 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1453 else {
1454
1455 if (u->mixer_path->has_dB) {
1456 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1457
1458 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1459 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1460
1461 if (u->mixer_path->max_dB > 0.0)
1462 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1463 else
1464 pa_log_info("No particular base volume set, fixing to 0 dB");
1465
1466 } else {
1467 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1468 u->sink->base_volume = PA_VOLUME_NORM;
1469 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1470 }
1471
1472 u->sink->get_volume = sink_get_volume_cb;
1473 u->sink->set_volume = sink_set_volume_cb;
1474
1475 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1476 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1477 }
1478
1479 if (!u->mixer_path->has_mute) {
1480 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1481 } else {
1482 u->sink->get_mute = sink_get_mute_cb;
1483 u->sink->set_mute = sink_set_mute_cb;
1484 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1485 pa_log_info("Using hardware mute control.");
1486 }
1487
1488 u->mixer_fdl = pa_alsa_fdlist_new();
1489
1490 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1491 pa_log("Failed to initialize file descriptor monitoring");
1492 return -1;
1493 }
1494
1495 if (u->mixer_path_set)
1496 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1497 else
1498 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1499
1500 return 0;
1501 }
1502
1503 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1504
1505 struct userdata *u = NULL;
1506 const char *dev_id = NULL;
1507 pa_sample_spec ss, requested_ss;
1508 pa_channel_map map;
1509 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1510 snd_pcm_uframes_t period_frames, tsched_frames;
1511 size_t frame_size;
1512 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1513 pa_sink_new_data data;
1514 pa_alsa_profile_set *profile_set = NULL;
1515
1516 pa_assert(m);
1517 pa_assert(ma);
1518
1519 ss = m->core->default_sample_spec;
1520 map = m->core->default_channel_map;
1521 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1522 pa_log("Failed to parse sample specification and channel map");
1523 goto fail;
1524 }
1525
1526 requested_ss = ss;
1527 frame_size = pa_frame_size(&ss);
1528
1529 nfrags = m->core->default_n_fragments;
1530 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1531 if (frag_size <= 0)
1532 frag_size = (uint32_t) frame_size;
1533 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1534 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1535
1536 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1537 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1538 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1539 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1540 pa_log("Failed to parse buffer metrics");
1541 goto fail;
1542 }
1543
1544 hwbuf_size = frag_size * nfrags;
1545 period_frames = frag_size/frame_size;
1546 tsched_frames = tsched_size/frame_size;
1547
1548 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1549 pa_log("Failed to parse mmap argument.");
1550 goto fail;
1551 }
1552
1553 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1554 pa_log("Failed to parse tsched argument.");
1555 goto fail;
1556 }
1557
1558 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1559 pa_log("Failed to parse ignore_dB argument.");
1560 goto fail;
1561 }
1562
1563 if (use_tsched && !pa_rtclock_hrtimer()) {
1564 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1565 use_tsched = FALSE;
1566 }
1567
1568 u = pa_xnew0(struct userdata, 1);
1569 u->core = m->core;
1570 u->module = m;
1571 u->use_mmap = use_mmap;
1572 u->use_tsched = use_tsched;
1573 u->first = TRUE;
1574 u->rtpoll = pa_rtpoll_new();
1575 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1576
1577 u->smoother = pa_smoother_new(
1578 DEFAULT_TSCHED_BUFFER_USEC*2,
1579 DEFAULT_TSCHED_BUFFER_USEC*2,
1580 TRUE,
1581 TRUE,
1582 5,
1583 pa_rtclock_usec(),
1584 TRUE);
1585
1586 dev_id = pa_modargs_get_value(
1587 ma, "device_id",
1588 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1589
1590 if (reserve_init(u, dev_id) < 0)
1591 goto fail;
1592
1593 if (reserve_monitor_init(u, dev_id) < 0)
1594 goto fail;
1595
1596 b = use_mmap;
1597 d = use_tsched;
1598
1599 if (mapping) {
1600
1601 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1602 pa_log("device_id= not set");
1603 goto fail;
1604 }
1605
1606 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1607 dev_id,
1608 &u->device_name,
1609 &ss, &map,
1610 SND_PCM_STREAM_PLAYBACK,
1611 &nfrags, &period_frames, tsched_frames,
1612 &b, &d, mapping)))
1613
1614 goto fail;
1615
1616 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1617
1618 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1619 goto fail;
1620
1621 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1622 dev_id,
1623 &u->device_name,
1624 &ss, &map,
1625 SND_PCM_STREAM_PLAYBACK,
1626 &nfrags, &period_frames, tsched_frames,
1627 &b, &d, profile_set, &mapping)))
1628
1629 goto fail;
1630
1631 } else {
1632
1633 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1634 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1635 &u->device_name,
1636 &ss, &map,
1637 SND_PCM_STREAM_PLAYBACK,
1638 &nfrags, &period_frames, tsched_frames,
1639 &b, &d, FALSE)))
1640 goto fail;
1641 }
1642
1643 pa_assert(u->device_name);
1644 pa_log_info("Successfully opened device %s.", u->device_name);
1645
1646 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1647 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1648 goto fail;
1649 }
1650
1651 if (mapping)
1652 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1653
1654 if (use_mmap && !b) {
1655 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1656 u->use_mmap = use_mmap = FALSE;
1657 }
1658
1659 if (use_tsched && (!b || !d)) {
1660 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1661 u->use_tsched = use_tsched = FALSE;
1662 }
1663
1664 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1665 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1666 u->use_tsched = use_tsched = FALSE;
1667 }
1668
1669 if (u->use_mmap)
1670 pa_log_info("Successfully enabled mmap() mode.");
1671
1672 if (u->use_tsched)
1673 pa_log_info("Successfully enabled timer-based scheduling mode.");
1674
1675 /* ALSA might tweak the sample spec, so recalculate the frame size */
1676 frame_size = pa_frame_size(&ss);
1677
1678 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1679
1680 pa_sink_new_data_init(&data);
1681 data.driver = driver;
1682 data.module = m;
1683 data.card = card;
1684 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1685 pa_sink_new_data_set_sample_spec(&data, &ss);
1686 pa_sink_new_data_set_channel_map(&data, &map);
1687
1688 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1689 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1690 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1691 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1692 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1693
1694 if (mapping) {
1695 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1696 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1697 }
1698
1699 pa_alsa_init_description(data.proplist);
1700
1701 if (u->control_device)
1702 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1703
1704 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1705 pa_log("Invalid properties");
1706 pa_sink_new_data_done(&data);
1707 goto fail;
1708 }
1709
1710 if (u->mixer_path_set)
1711 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1712
1713 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1714 pa_sink_new_data_done(&data);
1715
1716 if (!u->sink) {
1717 pa_log("Failed to create sink object");
1718 goto fail;
1719 }
1720
1721 u->sink->parent.process_msg = sink_process_msg;
1722 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1723 u->sink->set_state = sink_set_state_cb;
1724 u->sink->set_port = sink_set_port_cb;
1725 u->sink->userdata = u;
1726
1727 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1728 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1729
1730 u->frame_size = frame_size;
1731 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1732 u->nfragments = nfrags;
1733 u->hwbuf_size = u->fragment_size * nfrags;
1734 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1735 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1736
1737 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1738 nfrags, (long unsigned) u->fragment_size,
1739 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1740
1741 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1742 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1743
1744 if (u->use_tsched) {
1745 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1746
1747 fix_min_sleep_wakeup(u);
1748 fix_tsched_watermark(u);
1749
1750 pa_sink_set_latency_range(u->sink,
1751 0,
1752 pa_bytes_to_usec(u->hwbuf_size, &ss));
1753
1754 pa_log_info("Time scheduling watermark is %0.2fms",
1755 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1756 } else
1757 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1758
1759 reserve_update(u);
1760
1761 if (update_sw_params(u) < 0)
1762 goto fail;
1763
1764 if (setup_mixer(u, ignore_dB) < 0)
1765 goto fail;
1766
1767 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1768
1769 if (!(u->thread = pa_thread_new(thread_func, u))) {
1770 pa_log("Failed to create thread.");
1771 goto fail;
1772 }
1773
1774 /* Get initial mixer settings */
1775 if (data.volume_is_set) {
1776 if (u->sink->set_volume)
1777 u->sink->set_volume(u->sink);
1778 } else {
1779 if (u->sink->get_volume)
1780 u->sink->get_volume(u->sink);
1781 }
1782
1783 if (data.muted_is_set) {
1784 if (u->sink->set_mute)
1785 u->sink->set_mute(u->sink);
1786 } else {
1787 if (u->sink->get_mute)
1788 u->sink->get_mute(u->sink);
1789 }
1790
1791 pa_sink_put(u->sink);
1792
1793 if (profile_set)
1794 pa_alsa_profile_set_free(profile_set);
1795
1796 return u->sink;
1797
1798 fail:
1799
1800 if (u)
1801 userdata_free(u);
1802
1803 if (profile_set)
1804 pa_alsa_profile_set_free(profile_set);
1805
1806 return NULL;
1807 }
1808
1809 static void userdata_free(struct userdata *u) {
1810 pa_assert(u);
1811
1812 if (u->sink)
1813 pa_sink_unlink(u->sink);
1814
1815 if (u->thread) {
1816 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1817 pa_thread_free(u->thread);
1818 }
1819
1820 pa_thread_mq_done(&u->thread_mq);
1821
1822 if (u->sink)
1823 pa_sink_unref(u->sink);
1824
1825 if (u->memchunk.memblock)
1826 pa_memblock_unref(u->memchunk.memblock);
1827
1828 if (u->alsa_rtpoll_item)
1829 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1830
1831 if (u->rtpoll)
1832 pa_rtpoll_free(u->rtpoll);
1833
1834 if (u->pcm_handle) {
1835 snd_pcm_drop(u->pcm_handle);
1836 snd_pcm_close(u->pcm_handle);
1837 }
1838
1839 if (u->mixer_fdl)
1840 pa_alsa_fdlist_free(u->mixer_fdl);
1841
1842 if (u->mixer_path_set)
1843 pa_alsa_path_set_free(u->mixer_path_set);
1844 else if (u->mixer_path)
1845 pa_alsa_path_free(u->mixer_path);
1846
1847 if (u->mixer_handle)
1848 snd_mixer_close(u->mixer_handle);
1849
1850 if (u->smoother)
1851 pa_smoother_free(u->smoother);
1852
1853 reserve_done(u);
1854 monitor_done(u);
1855
1856 pa_xfree(u->device_name);
1857 pa_xfree(u->control_device);
1858 pa_xfree(u);
1859 }
1860
1861 void pa_alsa_sink_free(pa_sink *s) {
1862 struct userdata *u;
1863
1864 pa_sink_assert_ref(s);
1865 pa_assert_se(u = s->userdata);
1866
1867 userdata_free(u);
1868 }