]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: when printing warning about bogus data from alsa include snd_pcm_dump()
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38 #include <pulse/i18n.h>
39
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/rtclock.h>
54 #include <pulsecore/time-smoother.h>
55
56 #include <modules/reserve-wrap.h>
57
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
60
61 /* #define DEBUG_TIMING */
62
63 #define DEFAULT_DEVICE "default"
64 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
65 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
66 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
67 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
68 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
69
70 struct userdata {
71 pa_core *core;
72 pa_module *module;
73 pa_sink *sink;
74
75 pa_thread *thread;
76 pa_thread_mq thread_mq;
77 pa_rtpoll *rtpoll;
78
79 snd_pcm_t *pcm_handle;
80
81 pa_alsa_fdlist *mixer_fdl;
82 snd_mixer_t *mixer_handle;
83 snd_mixer_elem_t *mixer_elem;
84 long hw_volume_max, hw_volume_min;
85 long hw_dB_max, hw_dB_min;
86 pa_bool_t hw_dB_supported:1;
87 pa_bool_t mixer_seperate_channels:1;
88 pa_cvolume hardware_volume;
89
90 size_t
91 frame_size,
92 fragment_size,
93 hwbuf_size,
94 tsched_watermark,
95 hwbuf_unused,
96 min_sleep,
97 min_wakeup,
98 watermark_step;
99
100 unsigned nfragments;
101 pa_memchunk memchunk;
102
103 char *device_name;
104
105 pa_bool_t use_mmap:1, use_tsched:1;
106
107 pa_bool_t first, after_rewind;
108
109 pa_rtpoll_item *alsa_rtpoll_item;
110
111 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
112
113 pa_smoother *smoother;
114 uint64_t write_count;
115 uint64_t since_start;
116
117 pa_reserve_wrapper *reserve;
118 pa_hook_slot *reserve_slot;
119 };
120
121 static void userdata_free(struct userdata *u);
122
123 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
124 pa_assert(r);
125 pa_assert(u);
126
127 if (pa_sink_suspend(u->sink, TRUE) < 0)
128 return PA_HOOK_CANCEL;
129
130 return PA_HOOK_OK;
131 }
132
133 static void reserve_done(struct userdata *u) {
134 pa_assert(u);
135
136 if (u->reserve_slot) {
137 pa_hook_slot_free(u->reserve_slot);
138 u->reserve_slot = NULL;
139 }
140
141 if (u->reserve) {
142 pa_reserve_wrapper_unref(u->reserve);
143 u->reserve = NULL;
144 }
145 }
146
147 static void reserve_update(struct userdata *u) {
148 const char *description;
149 pa_assert(u);
150
151 if (!u->sink || !u->reserve)
152 return;
153
154 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
155 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
156 }
157
158 static int reserve_init(struct userdata *u, const char *dname) {
159 char *rname;
160
161 pa_assert(u);
162 pa_assert(dname);
163
164 if (u->reserve)
165 return 0;
166
167 if (pa_in_system_mode())
168 return 0;
169
170 /* We are resuming, try to lock the device */
171 if (!(rname = pa_alsa_get_reserve_name(dname)))
172 return 0;
173
174 u->reserve = pa_reserve_wrapper_get(u->core, rname);
175 pa_xfree(rname);
176
177 if (!(u->reserve))
178 return -1;
179
180 reserve_update(u);
181
182 pa_assert(!u->reserve_slot);
183 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
184
185 return 0;
186 }
187
188 static void fix_min_sleep_wakeup(struct userdata *u) {
189 size_t max_use, max_use_2;
190
191 pa_assert(u);
192
193 max_use = u->hwbuf_size - u->hwbuf_unused;
194 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
195
196 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
197 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
198
199 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
200 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
201 }
202
203 static void fix_tsched_watermark(struct userdata *u) {
204 size_t max_use;
205 pa_assert(u);
206
207 max_use = u->hwbuf_size - u->hwbuf_unused;
208
209 if (u->tsched_watermark > max_use - u->min_sleep)
210 u->tsched_watermark = max_use - u->min_sleep;
211
212 if (u->tsched_watermark < u->min_wakeup)
213 u->tsched_watermark = u->min_wakeup;
214 }
215
216 static void adjust_after_underrun(struct userdata *u) {
217 size_t old_watermark;
218 pa_usec_t old_min_latency, new_min_latency;
219
220 pa_assert(u);
221 pa_assert(u->use_tsched);
222
223 /* First, just try to increase the watermark */
224 old_watermark = u->tsched_watermark;
225 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
226 fix_tsched_watermark(u);
227
228 if (old_watermark != u->tsched_watermark) {
229 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
230 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
231 return;
232 }
233
234 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
235 old_min_latency = u->sink->thread_info.min_latency;
236 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
237 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
238
239 if (old_min_latency != new_min_latency) {
240 pa_log_notice("Increasing minimal latency to %0.2f ms",
241 (double) new_min_latency / PA_USEC_PER_MSEC);
242
243 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
244 return;
245 }
246
247 /* When we reach this we're officialy fucked! */
248 }
249
250 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
251 pa_usec_t usec, wm;
252
253 pa_assert(sleep_usec);
254 pa_assert(process_usec);
255
256 pa_assert(u);
257
258 usec = pa_sink_get_requested_latency_within_thread(u->sink);
259
260 if (usec == (pa_usec_t) -1)
261 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
262
263 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
264
265 if (wm > usec)
266 wm = usec/2;
267
268 *sleep_usec = usec - wm;
269 *process_usec = wm;
270
271 #ifdef DEBUG_TIMING
272 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
273 (unsigned long) (usec / PA_USEC_PER_MSEC),
274 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
275 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
276 #endif
277 }
278
279 static int try_recover(struct userdata *u, const char *call, int err) {
280 pa_assert(u);
281 pa_assert(call);
282 pa_assert(err < 0);
283
284 pa_log_debug("%s: %s", call, snd_strerror(err));
285
286 pa_assert(err != -EAGAIN);
287
288 if (err == -EPIPE)
289 pa_log_debug("%s: Buffer underrun!", call);
290
291 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
292 pa_log("%s: %s", call, snd_strerror(err));
293 return -1;
294 }
295
296 u->first = TRUE;
297 u->since_start = 0;
298 return 0;
299 }
300
301 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
302 size_t left_to_play;
303
304 /* We use <= instead of < for this check here because an underrun
305 * only happens after the last sample was processed, not already when
306 * it is removed from the buffer. This is particularly important
307 * when block transfer is used. */
308
309 if (n_bytes <= u->hwbuf_size) {
310 left_to_play = u->hwbuf_size - n_bytes;
311
312 #ifdef DEBUG_TIMING
313 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
314 #endif
315
316 } else {
317 left_to_play = 0;
318
319 #ifdef DEBUG_TIMING
320 PA_DEBUG_TRAP;
321 #endif
322
323 if (!u->first && !u->after_rewind) {
324
325 if (pa_log_ratelimit())
326 pa_log_info("Underrun!");
327
328 if (u->use_tsched)
329 adjust_after_underrun(u);
330 }
331 }
332
333 return left_to_play;
334 }
335
336 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
337 pa_bool_t work_done = TRUE;
338 pa_usec_t max_sleep_usec = 0, process_usec = 0;
339 size_t left_to_play;
340 unsigned j = 0;
341
342 pa_assert(u);
343 pa_sink_assert_ref(u->sink);
344
345 if (u->use_tsched)
346 hw_sleep_time(u, &max_sleep_usec, &process_usec);
347
348 for (;;) {
349 snd_pcm_sframes_t n;
350 size_t n_bytes;
351 int r;
352
353 /* First we determine how many samples are missing to fill the
354 * buffer up to 100% */
355
356 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
357
358 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
359 continue;
360
361 return r;
362 }
363
364 n_bytes = (size_t) n * u->frame_size;
365
366 #ifdef DEBUG_TIMING
367 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
368 #endif
369
370 left_to_play = check_left_to_play(u, n_bytes);
371
372 if (u->use_tsched)
373
374 /* We won't fill up the playback buffer before at least
375 * half the sleep time is over because otherwise we might
376 * ask for more data from the clients then they expect. We
377 * need to guarantee that clients only have to keep around
378 * a single hw buffer length. */
379
380 if (!polled &&
381 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
382 #ifdef DEBUG_TIMING
383 pa_log_debug("Not filling up, because too early.");
384 #endif
385 break;
386 }
387
388 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
389
390 if (polled)
391 PA_ONCE_BEGIN {
392 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
393 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
394 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
395 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
396 pa_strnull(dn));
397 pa_xfree(dn);
398 } PA_ONCE_END;
399
400 #ifdef DEBUG_TIMING
401 pa_log_debug("Not filling up, because not necessary.");
402 #endif
403 break;
404 }
405
406
407 if (++j > 10) {
408 #ifdef DEBUG_TIMING
409 pa_log_debug("Not filling up, because already too many iterations.");
410 #endif
411
412 break;
413 }
414
415 n_bytes -= u->hwbuf_unused;
416 polled = FALSE;
417
418 #ifdef DEBUG_TIMING
419 pa_log_debug("Filling up");
420 #endif
421
422 for (;;) {
423 pa_memchunk chunk;
424 void *p;
425 int err;
426 const snd_pcm_channel_area_t *areas;
427 snd_pcm_uframes_t offset, frames;
428 snd_pcm_sframes_t sframes;
429
430 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
431 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
432
433 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
434
435 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
436 continue;
437
438 return r;
439 }
440
441 /* Make sure that if these memblocks need to be copied they will fit into one slot */
442 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
443 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
444
445 /* Check these are multiples of 8 bit */
446 pa_assert((areas[0].first & 7) == 0);
447 pa_assert((areas[0].step & 7)== 0);
448
449 /* We assume a single interleaved memory buffer */
450 pa_assert((areas[0].first >> 3) == 0);
451 pa_assert((areas[0].step >> 3) == u->frame_size);
452
453 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
454
455 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
456 chunk.length = pa_memblock_get_length(chunk.memblock);
457 chunk.index = 0;
458
459 pa_sink_render_into_full(u->sink, &chunk);
460 pa_memblock_unref_fixed(chunk.memblock);
461
462 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
463
464 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
465 continue;
466
467 return r;
468 }
469
470 work_done = TRUE;
471
472 u->write_count += frames * u->frame_size;
473 u->since_start += frames * u->frame_size;
474
475 #ifdef DEBUG_TIMING
476 pa_log_debug("Wrote %lu bytes", (unsigned long) (frames * u->frame_size));
477 #endif
478
479 if ((size_t) frames * u->frame_size >= n_bytes)
480 break;
481
482 n_bytes -= (size_t) frames * u->frame_size;
483 }
484 }
485
486 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
487 return work_done ? 1 : 0;
488 }
489
490 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
491 pa_bool_t work_done = FALSE;
492 pa_usec_t max_sleep_usec = 0, process_usec = 0;
493 size_t left_to_play;
494 unsigned j = 0;
495
496 pa_assert(u);
497 pa_sink_assert_ref(u->sink);
498
499 if (u->use_tsched)
500 hw_sleep_time(u, &max_sleep_usec, &process_usec);
501
502 for (;;) {
503 snd_pcm_sframes_t n;
504 size_t n_bytes;
505 int r;
506
507 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
508
509 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
510 continue;
511
512 return r;
513 }
514
515 n_bytes = (size_t) n * u->frame_size;
516 left_to_play = check_left_to_play(u, n_bytes);
517
518 if (u->use_tsched)
519
520 /* We won't fill up the playback buffer before at least
521 * half the sleep time is over because otherwise we might
522 * ask for more data from the clients then they expect. We
523 * need to guarantee that clients only have to keep around
524 * a single hw buffer length. */
525
526 if (!polled &&
527 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
528 break;
529
530 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
531
532 if (polled)
533 PA_ONCE_BEGIN {
534 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
535 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
536 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
537 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
538 pa_strnull(dn));
539 pa_xfree(dn);
540 } PA_ONCE_END;
541
542 break;
543 }
544
545 if (++j > 10) {
546 #ifdef DEBUG_TIMING
547 pa_log_debug("Not filling up, because already too many iterations.");
548 #endif
549
550 break;
551 }
552
553 n_bytes -= u->hwbuf_unused;
554 polled = FALSE;
555
556 for (;;) {
557 snd_pcm_sframes_t frames;
558 void *p;
559
560 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
561
562 if (u->memchunk.length <= 0)
563 pa_sink_render(u->sink, n_bytes, &u->memchunk);
564
565 pa_assert(u->memchunk.length > 0);
566
567 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
568
569 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
570 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
571
572 p = pa_memblock_acquire(u->memchunk.memblock);
573 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
574 pa_memblock_release(u->memchunk.memblock);
575
576 pa_assert(frames != 0);
577
578 if (PA_UNLIKELY(frames < 0)) {
579
580 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
581 continue;
582
583 return r;
584 }
585
586 u->memchunk.index += (size_t) frames * u->frame_size;
587 u->memchunk.length -= (size_t) frames * u->frame_size;
588
589 if (u->memchunk.length <= 0) {
590 pa_memblock_unref(u->memchunk.memblock);
591 pa_memchunk_reset(&u->memchunk);
592 }
593
594 work_done = TRUE;
595
596 u->write_count += frames * u->frame_size;
597 u->since_start += frames * u->frame_size;
598
599 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
600
601 if ((size_t) frames * u->frame_size >= n_bytes)
602 break;
603
604 n_bytes -= (size_t) frames * u->frame_size;
605 }
606 }
607
608 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
609 return work_done ? 1 : 0;
610 }
611
612 static void update_smoother(struct userdata *u) {
613 snd_pcm_sframes_t delay = 0;
614 int64_t position;
615 int err;
616 pa_usec_t now1 = 0, now2;
617 snd_pcm_status_t *status;
618
619 snd_pcm_status_alloca(&status);
620
621 pa_assert(u);
622 pa_assert(u->pcm_handle);
623
624 /* Let's update the time smoother */
625
626 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
627 pa_log_warn("Failed to query DSP status data: %s", snd_strerror(err));
628 return;
629 }
630
631 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
632 pa_log_warn("Failed to get timestamp: %s", snd_strerror(err));
633 else {
634 snd_htimestamp_t htstamp = { 0, 0 };
635 snd_pcm_status_get_htstamp(status, &htstamp);
636 now1 = pa_timespec_load(&htstamp);
637 }
638
639 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
640
641 if (PA_UNLIKELY(position < 0))
642 position = 0;
643
644 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
645 if (now1 <= 0)
646 now1 = pa_rtclock_usec();
647
648 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
649
650 pa_smoother_put(u->smoother, now1, now2);
651 }
652
653 static pa_usec_t sink_get_latency(struct userdata *u) {
654 pa_usec_t r;
655 int64_t delay;
656 pa_usec_t now1, now2;
657
658 pa_assert(u);
659
660 now1 = pa_rtclock_usec();
661 now2 = pa_smoother_get(u->smoother, now1);
662
663 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
664
665 r = delay >= 0 ? (pa_usec_t) delay : 0;
666
667 if (u->memchunk.memblock)
668 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
669
670 return r;
671 }
672
673 static int build_pollfd(struct userdata *u) {
674 pa_assert(u);
675 pa_assert(u->pcm_handle);
676
677 if (u->alsa_rtpoll_item)
678 pa_rtpoll_item_free(u->alsa_rtpoll_item);
679
680 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
681 return -1;
682
683 return 0;
684 }
685
686 /* Called from IO context */
687 static int suspend(struct userdata *u) {
688 pa_assert(u);
689 pa_assert(u->pcm_handle);
690
691 pa_smoother_pause(u->smoother, pa_rtclock_usec());
692
693 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
694 * take awfully long with our long buffer sizes today. */
695 snd_pcm_close(u->pcm_handle);
696 u->pcm_handle = NULL;
697
698 if (u->alsa_rtpoll_item) {
699 pa_rtpoll_item_free(u->alsa_rtpoll_item);
700 u->alsa_rtpoll_item = NULL;
701 }
702
703 pa_log_info("Device suspended...");
704
705 return 0;
706 }
707
708 /* Called from IO context */
709 static int update_sw_params(struct userdata *u) {
710 snd_pcm_uframes_t avail_min;
711 int err;
712
713 pa_assert(u);
714
715 /* Use the full buffer if noone asked us for anything specific */
716 u->hwbuf_unused = 0;
717
718 if (u->use_tsched) {
719 pa_usec_t latency;
720
721 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
722 size_t b;
723
724 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
725
726 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
727
728 /* We need at least one sample in our buffer */
729
730 if (PA_UNLIKELY(b < u->frame_size))
731 b = u->frame_size;
732
733 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
734 }
735
736 fix_min_sleep_wakeup(u);
737 fix_tsched_watermark(u);
738 }
739
740 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
741
742 /* We need at last one frame in the used part of the buffer */
743 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
744
745 if (u->use_tsched) {
746 pa_usec_t sleep_usec, process_usec;
747
748 hw_sleep_time(u, &sleep_usec, &process_usec);
749 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
750 }
751
752 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
753
754 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
755 pa_log("Failed to set software parameters: %s", snd_strerror(err));
756 return err;
757 }
758
759 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
760
761 return 0;
762 }
763
764 /* Called from IO context */
765 static int unsuspend(struct userdata *u) {
766 pa_sample_spec ss;
767 int err;
768 pa_bool_t b, d;
769 unsigned nfrags;
770 snd_pcm_uframes_t period_size;
771
772 pa_assert(u);
773 pa_assert(!u->pcm_handle);
774
775 pa_log_info("Trying resume...");
776
777 snd_config_update_free_global();
778 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
779 /*SND_PCM_NONBLOCK|*/
780 SND_PCM_NO_AUTO_RESAMPLE|
781 SND_PCM_NO_AUTO_CHANNELS|
782 SND_PCM_NO_AUTO_FORMAT)) < 0) {
783 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
784 goto fail;
785 }
786
787 ss = u->sink->sample_spec;
788 nfrags = u->nfragments;
789 period_size = u->fragment_size / u->frame_size;
790 b = u->use_mmap;
791 d = u->use_tsched;
792
793 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
794 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
795 goto fail;
796 }
797
798 if (b != u->use_mmap || d != u->use_tsched) {
799 pa_log_warn("Resume failed, couldn't get original access mode.");
800 goto fail;
801 }
802
803 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
804 pa_log_warn("Resume failed, couldn't restore original sample settings.");
805 goto fail;
806 }
807
808 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
809 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
810 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
811 (unsigned long) nfrags, period_size * u->frame_size);
812 goto fail;
813 }
814
815 if (update_sw_params(u) < 0)
816 goto fail;
817
818 if (build_pollfd(u) < 0)
819 goto fail;
820
821 u->first = TRUE;
822 u->since_start = 0;
823
824 pa_log_info("Resumed successfully...");
825
826 return 0;
827
828 fail:
829 if (u->pcm_handle) {
830 snd_pcm_close(u->pcm_handle);
831 u->pcm_handle = NULL;
832 }
833
834 return -1;
835 }
836
837 /* Called from IO context */
838 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
839 struct userdata *u = PA_SINK(o)->userdata;
840
841 switch (code) {
842
843 case PA_SINK_MESSAGE_GET_LATENCY: {
844 pa_usec_t r = 0;
845
846 if (u->pcm_handle)
847 r = sink_get_latency(u);
848
849 *((pa_usec_t*) data) = r;
850
851 return 0;
852 }
853
854 case PA_SINK_MESSAGE_SET_STATE:
855
856 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
857
858 case PA_SINK_SUSPENDED:
859 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
860
861 if (suspend(u) < 0)
862 return -1;
863
864 break;
865
866 case PA_SINK_IDLE:
867 case PA_SINK_RUNNING:
868
869 if (u->sink->thread_info.state == PA_SINK_INIT) {
870 if (build_pollfd(u) < 0)
871 return -1;
872 }
873
874 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
875 if (unsuspend(u) < 0)
876 return -1;
877 }
878
879 break;
880
881 case PA_SINK_UNLINKED:
882 case PA_SINK_INIT:
883 case PA_SINK_INVALID_STATE:
884 ;
885 }
886
887 break;
888 }
889
890 return pa_sink_process_msg(o, code, data, offset, chunk);
891 }
892
893 /* Called from main context */
894 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
895 pa_sink_state_t old_state;
896 struct userdata *u;
897
898 pa_sink_assert_ref(s);
899 pa_assert_se(u = s->userdata);
900
901 old_state = pa_sink_get_state(u->sink);
902
903 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
904 reserve_done(u);
905 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
906 if (reserve_init(u, u->device_name) < 0)
907 return -1;
908
909 return 0;
910 }
911
912 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
913 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
914
915 pa_assert(u);
916 pa_assert(u->mixer_handle);
917
918 if (mask == SND_CTL_EVENT_MASK_REMOVE)
919 return 0;
920
921 if (mask & SND_CTL_EVENT_MASK_VALUE) {
922 pa_sink_get_volume(u->sink, TRUE);
923 pa_sink_get_mute(u->sink, TRUE);
924 }
925
926 return 0;
927 }
928
929 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
930
931 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
932 (double) (u->hw_volume_max - u->hw_volume_min));
933 }
934
935 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
936 long alsa_vol;
937
938 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
939 / PA_VOLUME_NORM) + u->hw_volume_min;
940
941 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
942 }
943
944 static void sink_get_volume_cb(pa_sink *s) {
945 struct userdata *u = s->userdata;
946 int err;
947 unsigned i;
948 pa_cvolume r;
949 char t[PA_CVOLUME_SNPRINT_MAX];
950
951 pa_assert(u);
952 pa_assert(u->mixer_elem);
953
954 if (u->mixer_seperate_channels) {
955
956 r.channels = s->sample_spec.channels;
957
958 for (i = 0; i < s->sample_spec.channels; i++) {
959 long alsa_vol;
960
961 if (u->hw_dB_supported) {
962
963 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
964 goto fail;
965
966 #ifdef HAVE_VALGRIND_MEMCHECK_H
967 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
968 #endif
969
970 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
971 } else {
972
973 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
974 goto fail;
975
976 r.values[i] = from_alsa_volume(u, alsa_vol);
977 }
978 }
979
980 } else {
981 long alsa_vol;
982
983 if (u->hw_dB_supported) {
984
985 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
986 goto fail;
987
988 #ifdef HAVE_VALGRIND_MEMCHECK_H
989 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
990 #endif
991
992 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
993
994 } else {
995
996 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
997 goto fail;
998
999 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
1000 }
1001 }
1002
1003 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1004
1005 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
1006
1007 s->virtual_volume = u->hardware_volume = r;
1008
1009 if (u->hw_dB_supported) {
1010 pa_cvolume reset;
1011
1012 /* Hmm, so the hardware volume changed, let's reset our software volume */
1013 pa_cvolume_reset(&reset, s->sample_spec.channels);
1014 pa_sink_set_soft_volume(s, &reset);
1015 }
1016 }
1017
1018 return;
1019
1020 fail:
1021 pa_log_error("Unable to read volume: %s", snd_strerror(err));
1022 }
1023
1024 static void sink_set_volume_cb(pa_sink *s) {
1025 struct userdata *u = s->userdata;
1026 int err;
1027 unsigned i;
1028 pa_cvolume r;
1029
1030 pa_assert(u);
1031 pa_assert(u->mixer_elem);
1032
1033 if (u->mixer_seperate_channels) {
1034
1035 r.channels = s->sample_spec.channels;
1036
1037 for (i = 0; i < s->sample_spec.channels; i++) {
1038 long alsa_vol;
1039 pa_volume_t vol;
1040
1041 vol = s->virtual_volume.values[i];
1042
1043 if (u->hw_dB_supported) {
1044
1045 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
1046 alsa_vol += u->hw_dB_max;
1047 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
1048
1049 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
1050 goto fail;
1051
1052 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
1053 goto fail;
1054
1055 #ifdef HAVE_VALGRIND_MEMCHECK_H
1056 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1057 #endif
1058
1059 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
1060
1061 } else {
1062 alsa_vol = to_alsa_volume(u, vol);
1063
1064 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
1065 goto fail;
1066
1067 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
1068 goto fail;
1069
1070 r.values[i] = from_alsa_volume(u, alsa_vol);
1071 }
1072 }
1073
1074 } else {
1075 pa_volume_t vol;
1076 long alsa_vol;
1077
1078 vol = pa_cvolume_max(&s->virtual_volume);
1079
1080 if (u->hw_dB_supported) {
1081 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
1082 alsa_vol += u->hw_dB_max;
1083 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
1084
1085 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
1086 goto fail;
1087
1088 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1089 goto fail;
1090
1091 #ifdef HAVE_VALGRIND_MEMCHECK_H
1092 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1093 #endif
1094
1095 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
1096
1097 } else {
1098 alsa_vol = to_alsa_volume(u, vol);
1099
1100 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
1101 goto fail;
1102
1103 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1104 goto fail;
1105
1106 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
1107 }
1108 }
1109
1110 u->hardware_volume = r;
1111
1112 if (u->hw_dB_supported) {
1113 char t[PA_CVOLUME_SNPRINT_MAX];
1114
1115 /* Match exactly what the user requested by software */
1116 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1117
1118 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1119 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1120 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1121
1122 } else
1123
1124 /* We can't match exactly what the user requested, hence let's
1125 * at least tell the user about it */
1126
1127 s->virtual_volume = r;
1128
1129 return;
1130
1131 fail:
1132 pa_log_error("Unable to set volume: %s", snd_strerror(err));
1133 }
1134
1135 static void sink_get_mute_cb(pa_sink *s) {
1136 struct userdata *u = s->userdata;
1137 int err, sw;
1138
1139 pa_assert(u);
1140 pa_assert(u->mixer_elem);
1141
1142 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
1143 pa_log_error("Unable to get switch: %s", snd_strerror(err));
1144 return;
1145 }
1146
1147 s->muted = !sw;
1148 }
1149
1150 static void sink_set_mute_cb(pa_sink *s) {
1151 struct userdata *u = s->userdata;
1152 int err;
1153
1154 pa_assert(u);
1155 pa_assert(u->mixer_elem);
1156
1157 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
1158 pa_log_error("Unable to set switch: %s", snd_strerror(err));
1159 return;
1160 }
1161 }
1162
1163 static void sink_update_requested_latency_cb(pa_sink *s) {
1164 struct userdata *u = s->userdata;
1165 size_t before;
1166 pa_assert(u);
1167
1168 if (!u->pcm_handle)
1169 return;
1170
1171 before = u->hwbuf_unused;
1172 update_sw_params(u);
1173
1174 /* Let's check whether we now use only a smaller part of the
1175 buffer then before. If so, we need to make sure that subsequent
1176 rewinds are relative to the new maximum fill level and not to the
1177 current fill level. Thus, let's do a full rewind once, to clear
1178 things up. */
1179
1180 if (u->hwbuf_unused > before) {
1181 pa_log_debug("Requesting rewind due to latency change.");
1182 pa_sink_request_rewind(s, (size_t) -1);
1183 }
1184 }
1185
1186 static int process_rewind(struct userdata *u) {
1187 snd_pcm_sframes_t unused;
1188 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1189 pa_assert(u);
1190
1191 /* Figure out how much we shall rewind and reset the counter */
1192 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1193
1194 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1195
1196 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1197 pa_log("snd_pcm_avail() failed: %s", snd_strerror((int) unused));
1198 return -1;
1199 }
1200
1201 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1202
1203 if (u->hwbuf_size > unused_nbytes)
1204 limit_nbytes = u->hwbuf_size - unused_nbytes;
1205 else
1206 limit_nbytes = 0;
1207
1208 if (rewind_nbytes > limit_nbytes)
1209 rewind_nbytes = limit_nbytes;
1210
1211 if (rewind_nbytes > 0) {
1212 snd_pcm_sframes_t in_frames, out_frames;
1213
1214 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1215
1216 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1217 pa_log_debug("before: %lu", (unsigned long) in_frames);
1218 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1219 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1220 return -1;
1221 }
1222 pa_log_debug("after: %lu", (unsigned long) out_frames);
1223
1224 rewind_nbytes = (size_t) out_frames * u->frame_size;
1225
1226 if (rewind_nbytes <= 0)
1227 pa_log_info("Tried rewind, but was apparently not possible.");
1228 else {
1229 u->write_count -= out_frames * u->frame_size;
1230 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1231 pa_sink_process_rewind(u->sink, rewind_nbytes);
1232
1233 u->after_rewind = TRUE;
1234 return 0;
1235 }
1236 } else
1237 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1238
1239 pa_sink_process_rewind(u->sink, 0);
1240 return 0;
1241 }
1242
1243 static void thread_func(void *userdata) {
1244 struct userdata *u = userdata;
1245 unsigned short revents = 0;
1246
1247 pa_assert(u);
1248
1249 pa_log_debug("Thread starting up");
1250
1251 if (u->core->realtime_scheduling)
1252 pa_make_realtime(u->core->realtime_priority);
1253
1254 pa_thread_mq_install(&u->thread_mq);
1255 pa_rtpoll_install(u->rtpoll);
1256
1257 for (;;) {
1258 int ret;
1259
1260 #ifdef DEBUG_TIMING
1261 pa_log_debug("Loop");
1262 #endif
1263
1264 /* Render some data and write it to the dsp */
1265 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1266 int work_done;
1267 pa_usec_t sleep_usec = 0;
1268
1269 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1270 if (process_rewind(u) < 0)
1271 goto fail;
1272
1273 if (u->use_mmap)
1274 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1275 else
1276 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1277
1278 if (work_done < 0)
1279 goto fail;
1280
1281 /* pa_log_debug("work_done = %i", work_done); */
1282
1283 if (work_done) {
1284
1285 if (u->first) {
1286 pa_log_info("Starting playback.");
1287 snd_pcm_start(u->pcm_handle);
1288
1289 pa_smoother_resume(u->smoother, pa_rtclock_usec(), TRUE);
1290 }
1291
1292 update_smoother(u);
1293 }
1294
1295 if (u->use_tsched) {
1296 pa_usec_t cusec;
1297
1298 if (u->since_start <= u->hwbuf_size) {
1299
1300 /* USB devices on ALSA seem to hit a buffer
1301 * underrun during the first iterations much
1302 * quicker then we calculate here, probably due to
1303 * the transport latency. To accommodate for that
1304 * we artificially decrease the sleep time until
1305 * we have filled the buffer at least once
1306 * completely.*/
1307
1308 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1309 sleep_usec /= 2;
1310 }
1311
1312 /* OK, the playback buffer is now full, let's
1313 * calculate when to wake up next */
1314 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1315
1316 /* Convert from the sound card time domain to the
1317 * system time domain */
1318 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1319
1320 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1321
1322 /* We don't trust the conversion, so we wake up whatever comes first */
1323 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1324 }
1325
1326 u->first = FALSE;
1327 u->after_rewind = FALSE;
1328
1329 } else if (u->use_tsched)
1330
1331 /* OK, we're in an invalid state, let's disable our timers */
1332 pa_rtpoll_set_timer_disabled(u->rtpoll);
1333
1334 /* Hmm, nothing to do. Let's sleep */
1335 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1336 goto fail;
1337
1338 if (ret == 0)
1339 goto finish;
1340
1341 /* Tell ALSA about this and process its response */
1342 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1343 struct pollfd *pollfd;
1344 int err;
1345 unsigned n;
1346
1347 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1348
1349 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1350 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1351 goto fail;
1352 }
1353
1354 if (revents & ~POLLOUT) {
1355 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1356 goto fail;
1357
1358 u->first = TRUE;
1359 u->since_start = 0;
1360 } else if (revents && u->use_tsched && pa_log_ratelimit())
1361 pa_log_debug("Wakeup from ALSA!");
1362
1363 } else
1364 revents = 0;
1365 }
1366
1367 fail:
1368 /* If this was no regular exit from the loop we have to continue
1369 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1370 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1371 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1372
1373 finish:
1374 pa_log_debug("Thread shutting down");
1375 }
1376
1377 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name) {
1378 const char *n;
1379 char *t;
1380
1381 pa_assert(data);
1382 pa_assert(ma);
1383 pa_assert(device_name);
1384
1385 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1386 pa_sink_new_data_set_name(data, n);
1387 data->namereg_fail = TRUE;
1388 return;
1389 }
1390
1391 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1392 data->namereg_fail = TRUE;
1393 else {
1394 n = device_id ? device_id : device_name;
1395 data->namereg_fail = FALSE;
1396 }
1397
1398 t = pa_sprintf_malloc("alsa_output.%s", n);
1399 pa_sink_new_data_set_name(data, t);
1400 pa_xfree(t);
1401 }
1402
1403 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1404 pa_assert(u);
1405
1406 if (!u->mixer_handle)
1407 return 0;
1408
1409 pa_assert(u->mixer_elem);
1410
1411 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1412 pa_bool_t suitable = FALSE;
1413
1414 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1415 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1416 else if (u->hw_volume_min >= u->hw_volume_max)
1417 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1418 else {
1419 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1420 suitable = TRUE;
1421 }
1422
1423 if (suitable) {
1424 if (ignore_dB || snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1425 pa_log_info("Mixer doesn't support dB information or data is ignored.");
1426 else {
1427 #ifdef HAVE_VALGRIND_MEMCHECK_H
1428 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1429 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1430 #endif
1431
1432 if (u->hw_dB_min >= u->hw_dB_max)
1433 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1434 else {
1435 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1436 u->hw_dB_supported = TRUE;
1437
1438 if (u->hw_dB_max > 0) {
1439 u->sink->base_volume = pa_sw_volume_from_dB(- (double) u->hw_dB_max/100.0);
1440 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1441 } else
1442 pa_log_info("No particular base volume set, fixing to 0 dB");
1443 }
1444 }
1445
1446 if (!u->hw_dB_supported &&
1447 u->hw_volume_max - u->hw_volume_min < 3) {
1448
1449 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1450 suitable = FALSE;
1451 }
1452 }
1453
1454 if (suitable) {
1455 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &u->sink->channel_map, u->mixer_map, TRUE) >= 0;
1456
1457 u->sink->get_volume = sink_get_volume_cb;
1458 u->sink->set_volume = sink_set_volume_cb;
1459 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1460 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1461
1462 if (!u->hw_dB_supported)
1463 u->sink->n_volume_steps = u->hw_volume_max - u->hw_volume_min + 1;
1464 } else
1465 pa_log_info("Using software volume control.");
1466 }
1467
1468 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1469 u->sink->get_mute = sink_get_mute_cb;
1470 u->sink->set_mute = sink_set_mute_cb;
1471 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1472 } else
1473 pa_log_info("Using software mute control.");
1474
1475 u->mixer_fdl = pa_alsa_fdlist_new();
1476
1477 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1478 pa_log("Failed to initialize file descriptor monitoring");
1479 return -1;
1480 }
1481
1482 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1483 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1484
1485 return 0;
1486 }
1487
1488 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, const pa_alsa_profile_info *profile) {
1489
1490 struct userdata *u = NULL;
1491 const char *dev_id = NULL;
1492 pa_sample_spec ss, requested_ss;
1493 pa_channel_map map;
1494 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1495 snd_pcm_uframes_t period_frames, tsched_frames;
1496 size_t frame_size;
1497 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1498 pa_sink_new_data data;
1499
1500 pa_assert(m);
1501 pa_assert(ma);
1502
1503 ss = m->core->default_sample_spec;
1504 map = m->core->default_channel_map;
1505 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1506 pa_log("Failed to parse sample specification and channel map");
1507 goto fail;
1508 }
1509
1510 requested_ss = ss;
1511 frame_size = pa_frame_size(&ss);
1512
1513 nfrags = m->core->default_n_fragments;
1514 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1515 if (frag_size <= 0)
1516 frag_size = (uint32_t) frame_size;
1517 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1518 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1519
1520 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1521 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1522 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1523 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1524 pa_log("Failed to parse buffer metrics");
1525 goto fail;
1526 }
1527
1528 hwbuf_size = frag_size * nfrags;
1529 period_frames = frag_size/frame_size;
1530 tsched_frames = tsched_size/frame_size;
1531
1532 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1533 pa_log("Failed to parse mmap argument.");
1534 goto fail;
1535 }
1536
1537 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1538 pa_log("Failed to parse tsched argument.");
1539 goto fail;
1540 }
1541
1542 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1543 pa_log("Failed to parse ignore_dB argument.");
1544 goto fail;
1545 }
1546
1547 if (use_tsched && !pa_rtclock_hrtimer()) {
1548 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1549 use_tsched = FALSE;
1550 }
1551
1552 u = pa_xnew0(struct userdata, 1);
1553 u->core = m->core;
1554 u->module = m;
1555 u->use_mmap = use_mmap;
1556 u->use_tsched = use_tsched;
1557 u->first = TRUE;
1558 u->rtpoll = pa_rtpoll_new();
1559 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1560
1561 u->smoother = pa_smoother_new(
1562 DEFAULT_TSCHED_BUFFER_USEC*2,
1563 DEFAULT_TSCHED_BUFFER_USEC*2,
1564 TRUE,
1565 TRUE,
1566 5,
1567 pa_rtclock_usec(),
1568 TRUE);
1569
1570 if (reserve_init(u, pa_modargs_get_value(
1571 ma, "device_id",
1572 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE))) < 0)
1573 goto fail;
1574
1575 b = use_mmap;
1576 d = use_tsched;
1577
1578 if (profile) {
1579
1580 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1581 pa_log("device_id= not set");
1582 goto fail;
1583 }
1584
1585 if (!(u->pcm_handle = pa_alsa_open_by_device_id_profile(
1586 dev_id,
1587 &u->device_name,
1588 &ss, &map,
1589 SND_PCM_STREAM_PLAYBACK,
1590 &nfrags, &period_frames, tsched_frames,
1591 &b, &d, profile)))
1592
1593 goto fail;
1594
1595 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1596
1597 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1598 dev_id,
1599 &u->device_name,
1600 &ss, &map,
1601 SND_PCM_STREAM_PLAYBACK,
1602 &nfrags, &period_frames, tsched_frames,
1603 &b, &d, &profile)))
1604
1605 goto fail;
1606
1607 } else {
1608
1609 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1610 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1611 &u->device_name,
1612 &ss, &map,
1613 SND_PCM_STREAM_PLAYBACK,
1614 &nfrags, &period_frames, tsched_frames,
1615 &b, &d, FALSE)))
1616 goto fail;
1617
1618 }
1619
1620 pa_assert(u->device_name);
1621 pa_log_info("Successfully opened device %s.", u->device_name);
1622
1623 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1624 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1625 goto fail;
1626 }
1627
1628 if (profile)
1629 pa_log_info("Selected configuration '%s' (%s).", profile->description, profile->name);
1630
1631 if (use_mmap && !b) {
1632 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1633 u->use_mmap = use_mmap = FALSE;
1634 }
1635
1636 if (use_tsched && (!b || !d)) {
1637 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1638 u->use_tsched = use_tsched = FALSE;
1639 }
1640
1641 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1642 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1643 u->use_tsched = use_tsched = FALSE;
1644 }
1645
1646 if (u->use_mmap)
1647 pa_log_info("Successfully enabled mmap() mode.");
1648
1649 if (u->use_tsched)
1650 pa_log_info("Successfully enabled timer-based scheduling mode.");
1651
1652 /* ALSA might tweak the sample spec, so recalculate the frame size */
1653 frame_size = pa_frame_size(&ss);
1654
1655 pa_alsa_find_mixer_and_elem(u->pcm_handle, &u->mixer_handle, &u->mixer_elem, pa_modargs_get_value(ma, "control", NULL));
1656
1657 pa_sink_new_data_init(&data);
1658 data.driver = driver;
1659 data.module = m;
1660 data.card = card;
1661 set_sink_name(&data, ma, dev_id, u->device_name);
1662 pa_sink_new_data_set_sample_spec(&data, &ss);
1663 pa_sink_new_data_set_channel_map(&data, &map);
1664
1665 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1666 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1667 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1668 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1669 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1670
1671 if (profile) {
1672 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, profile->name);
1673 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, profile->description);
1674 }
1675
1676 pa_alsa_init_description(data.proplist);
1677
1678 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1679 pa_sink_new_data_done(&data);
1680
1681 if (!u->sink) {
1682 pa_log("Failed to create sink object");
1683 goto fail;
1684 }
1685
1686 u->sink->parent.process_msg = sink_process_msg;
1687 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1688 u->sink->set_state = sink_set_state_cb;
1689 u->sink->userdata = u;
1690
1691 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1692 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1693
1694 u->frame_size = frame_size;
1695 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1696 u->nfragments = nfrags;
1697 u->hwbuf_size = u->fragment_size * nfrags;
1698 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1699 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1700
1701 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1702 nfrags, (long unsigned) u->fragment_size,
1703 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1704
1705 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1706
1707 if (u->use_tsched) {
1708 fix_min_sleep_wakeup(u);
1709 fix_tsched_watermark(u);
1710
1711 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1712
1713 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1714
1715 pa_sink_set_latency_range(u->sink,
1716 0,
1717 pa_bytes_to_usec(u->hwbuf_size, &ss));
1718
1719 pa_log_info("Time scheduling watermark is %0.2fms",
1720 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1721 }
1722
1723 reserve_update(u);
1724
1725 if (update_sw_params(u) < 0)
1726 goto fail;
1727
1728 if (setup_mixer(u, ignore_dB) < 0)
1729 goto fail;
1730
1731 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1732
1733 if (!(u->thread = pa_thread_new(thread_func, u))) {
1734 pa_log("Failed to create thread.");
1735 goto fail;
1736 }
1737
1738 /* Get initial mixer settings */
1739 if (data.volume_is_set) {
1740 if (u->sink->set_volume)
1741 u->sink->set_volume(u->sink);
1742 } else {
1743 if (u->sink->get_volume)
1744 u->sink->get_volume(u->sink);
1745 }
1746
1747 if (data.muted_is_set) {
1748 if (u->sink->set_mute)
1749 u->sink->set_mute(u->sink);
1750 } else {
1751 if (u->sink->get_mute)
1752 u->sink->get_mute(u->sink);
1753 }
1754
1755 pa_sink_put(u->sink);
1756
1757 return u->sink;
1758
1759 fail:
1760
1761 userdata_free(u);
1762
1763 return NULL;
1764 }
1765
1766 static void userdata_free(struct userdata *u) {
1767 pa_assert(u);
1768
1769 if (u->sink)
1770 pa_sink_unlink(u->sink);
1771
1772 if (u->thread) {
1773 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1774 pa_thread_free(u->thread);
1775 }
1776
1777 pa_thread_mq_done(&u->thread_mq);
1778
1779 if (u->sink)
1780 pa_sink_unref(u->sink);
1781
1782 if (u->memchunk.memblock)
1783 pa_memblock_unref(u->memchunk.memblock);
1784
1785 if (u->alsa_rtpoll_item)
1786 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1787
1788 if (u->rtpoll)
1789 pa_rtpoll_free(u->rtpoll);
1790
1791 if (u->mixer_fdl)
1792 pa_alsa_fdlist_free(u->mixer_fdl);
1793
1794 if (u->mixer_handle)
1795 snd_mixer_close(u->mixer_handle);
1796
1797 if (u->pcm_handle) {
1798 snd_pcm_drop(u->pcm_handle);
1799 snd_pcm_close(u->pcm_handle);
1800 }
1801
1802 if (u->smoother)
1803 pa_smoother_free(u->smoother);
1804
1805 reserve_done(u);
1806
1807 pa_xfree(u->device_name);
1808 pa_xfree(u);
1809 }
1810
1811 void pa_alsa_sink_free(pa_sink *s) {
1812 struct userdata *u;
1813
1814 pa_sink_assert_ref(s);
1815 pa_assert_se(u = s->userdata);
1816
1817 userdata_free(u);
1818 }