]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Merge commit 'elmarco/shave'
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38 #include <pulse/i18n.h>
39
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/rtclock.h>
54 #include <pulsecore/time-smoother.h>
55
56 #include <modules/reserve-wrap.h>
57
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
60
61 /* #define DEBUG_TIMING */
62
63 #define DEFAULT_DEVICE "default"
64 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
65 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
66 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
67 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
68
69 struct userdata {
70 pa_core *core;
71 pa_module *module;
72 pa_sink *sink;
73
74 pa_thread *thread;
75 pa_thread_mq thread_mq;
76 pa_rtpoll *rtpoll;
77
78 snd_pcm_t *pcm_handle;
79
80 pa_alsa_fdlist *mixer_fdl;
81 snd_mixer_t *mixer_handle;
82 snd_mixer_elem_t *mixer_elem;
83 long hw_volume_max, hw_volume_min;
84 long hw_dB_max, hw_dB_min;
85 pa_bool_t hw_dB_supported:1;
86 pa_bool_t mixer_seperate_channels:1;
87 pa_cvolume hardware_volume;
88
89 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark, hwbuf_unused, min_sleep, min_wakeup;
90 unsigned nfragments;
91 pa_memchunk memchunk;
92
93 char *device_name;
94
95 pa_bool_t use_mmap:1, use_tsched:1;
96
97 pa_bool_t first, after_rewind;
98
99 pa_rtpoll_item *alsa_rtpoll_item;
100
101 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
102
103 pa_smoother *smoother;
104 uint64_t write_count;
105 uint64_t since_start;
106
107 pa_reserve_wrapper *reserve;
108 pa_hook_slot *reserve_slot;
109 };
110
111 static void userdata_free(struct userdata *u);
112
113 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
114 pa_assert(r);
115 pa_assert(u);
116
117 if (pa_sink_suspend(u->sink, TRUE) < 0)
118 return PA_HOOK_CANCEL;
119
120 return PA_HOOK_OK;
121 }
122
123 static void reserve_done(struct userdata *u) {
124 pa_assert(u);
125
126 if (u->reserve_slot) {
127 pa_hook_slot_free(u->reserve_slot);
128 u->reserve_slot = NULL;
129 }
130
131 if (u->reserve) {
132 pa_reserve_wrapper_unref(u->reserve);
133 u->reserve = NULL;
134 }
135 }
136
137 static void reserve_update(struct userdata *u) {
138 const char *description;
139 pa_assert(u);
140
141 if (!u->sink)
142 return;
143
144 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
145 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
146 }
147
148 static int reserve_init(struct userdata *u, const char *dname) {
149 char *rname;
150
151 pa_assert(u);
152 pa_assert(dname);
153
154 if (u->reserve)
155 return 0;
156
157 /* We are resuming, try to lock the device */
158 if (!(rname = pa_alsa_get_reserve_name(dname)))
159 return 0;
160
161 u->reserve = pa_reserve_wrapper_get(u->core, rname);
162 pa_xfree(rname);
163
164 if (!(u->reserve))
165 return -1;
166
167 reserve_update(u);
168
169 pa_assert(!u->reserve_slot);
170 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
171
172 return 0;
173 }
174
175 static void fix_min_sleep_wakeup(struct userdata *u) {
176 size_t max_use, max_use_2;
177
178 pa_assert(u);
179
180 max_use = u->hwbuf_size - u->hwbuf_unused;
181 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
182
183 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
184 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
185
186 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
187 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
188 }
189
190 static void fix_tsched_watermark(struct userdata *u) {
191 size_t max_use;
192 pa_assert(u);
193
194 max_use = u->hwbuf_size - u->hwbuf_unused;
195
196 if (u->tsched_watermark > max_use - u->min_sleep)
197 u->tsched_watermark = max_use - u->min_sleep;
198
199 if (u->tsched_watermark < u->min_wakeup)
200 u->tsched_watermark = u->min_wakeup;
201 }
202
203 static void adjust_after_underrun(struct userdata *u) {
204 size_t old_watermark;
205 pa_usec_t old_min_latency, new_min_latency;
206
207 pa_assert(u);
208
209 /* First, just try to increase the watermark */
210 old_watermark = u->tsched_watermark;
211 u->tsched_watermark *= 2;
212 fix_tsched_watermark(u);
213
214 if (old_watermark != u->tsched_watermark) {
215 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
216 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
217 return;
218 }
219
220 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
221 old_min_latency = u->sink->thread_info.min_latency;
222 new_min_latency = PA_MIN(old_min_latency * 2, u->sink->thread_info.max_latency);
223
224 if (old_min_latency != new_min_latency) {
225 pa_log_notice("Increasing minimal latency to %0.2f ms",
226 (double) new_min_latency / PA_USEC_PER_MSEC);
227
228 pa_sink_update_latency_range(u->sink, new_min_latency, u->sink->thread_info.max_latency);
229 return;
230 }
231
232 /* When we reach this we're officialy fucked! */
233 }
234
235 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
236 pa_usec_t usec, wm;
237
238 pa_assert(sleep_usec);
239 pa_assert(process_usec);
240
241 pa_assert(u);
242
243 usec = pa_sink_get_requested_latency_within_thread(u->sink);
244
245 if (usec == (pa_usec_t) -1)
246 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
247
248 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
249
250 if (wm > usec)
251 wm = usec/2;
252
253 *sleep_usec = usec - wm;
254 *process_usec = wm;
255
256 #ifdef DEBUG_TIMING
257 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
258 (unsigned long) (usec / PA_USEC_PER_MSEC),
259 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
260 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
261 #endif
262 }
263
264 static int try_recover(struct userdata *u, const char *call, int err) {
265 pa_assert(u);
266 pa_assert(call);
267 pa_assert(err < 0);
268
269 pa_log_debug("%s: %s", call, snd_strerror(err));
270
271 pa_assert(err != -EAGAIN);
272
273 if (err == -EPIPE)
274 pa_log_debug("%s: Buffer underrun!", call);
275
276 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
277 pa_log("%s: %s", call, snd_strerror(err));
278 return -1;
279 }
280
281 u->first = TRUE;
282 u->since_start = 0;
283 return 0;
284 }
285
286 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
287 size_t left_to_play;
288
289 /* We use <= instead of < for this check here because an underrun
290 * only happens after the last sample was processed, not already when
291 * it is removed from the buffer. This is particularly important
292 * when block transfer is used. */
293
294 if (n_bytes <= u->hwbuf_size) {
295 left_to_play = u->hwbuf_size - n_bytes;
296
297 #ifdef DEBUG_TIMING
298 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
299 #endif
300
301 } else {
302 left_to_play = 0;
303
304 #ifdef DEBUG_TIMING
305 PA_DEBUG_TRAP;
306 #endif
307
308 if (!u->first && !u->after_rewind) {
309
310 if (pa_log_ratelimit())
311 pa_log_info("Underrun!");
312
313 if (u->use_tsched)
314 adjust_after_underrun(u);
315 }
316 }
317
318 return left_to_play;
319 }
320
321 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
322 pa_bool_t work_done = TRUE;
323 pa_usec_t max_sleep_usec = 0, process_usec = 0;
324 size_t left_to_play;
325 unsigned j = 0;
326
327 pa_assert(u);
328 pa_sink_assert_ref(u->sink);
329
330 if (u->use_tsched)
331 hw_sleep_time(u, &max_sleep_usec, &process_usec);
332
333 for (;;) {
334 snd_pcm_sframes_t n;
335 size_t n_bytes;
336 int r;
337
338 /* First we determine how many samples are missing to fill the
339 * buffer up to 100% */
340
341 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
342
343 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
344 continue;
345
346 return r;
347 }
348
349 n_bytes = (size_t) n * u->frame_size;
350
351 #ifdef DEBUG_TIMING
352 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
353 #endif
354
355 left_to_play = check_left_to_play(u, n_bytes);
356
357 if (u->use_tsched)
358
359 /* We won't fill up the playback buffer before at least
360 * half the sleep time is over because otherwise we might
361 * ask for more data from the clients then they expect. We
362 * need to guarantee that clients only have to keep around
363 * a single hw buffer length. */
364
365 if (!polled &&
366 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
367 #ifdef DEBUG_TIMING
368 pa_log_debug("Not filling up, because too early.");
369 #endif
370 break;
371 }
372
373 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
374
375 if (polled)
376 PA_ONCE_BEGIN {
377 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
378 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
379 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
380 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
381 pa_strnull(dn));
382 pa_xfree(dn);
383 } PA_ONCE_END;
384
385 #ifdef DEBUG_TIMING
386 pa_log_debug("Not filling up, because not necessary.");
387 #endif
388 break;
389 }
390
391
392 if (++j > 10) {
393 #ifdef DEBUG_TIMING
394 pa_log_debug("Not filling up, because already too many iterations.");
395 #endif
396
397 break;
398 }
399
400 n_bytes -= u->hwbuf_unused;
401 polled = FALSE;
402
403 #ifdef DEBUG_TIMING
404 pa_log_debug("Filling up");
405 #endif
406
407 for (;;) {
408 pa_memchunk chunk;
409 void *p;
410 int err;
411 const snd_pcm_channel_area_t *areas;
412 snd_pcm_uframes_t offset, frames;
413 snd_pcm_sframes_t sframes;
414
415 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
416 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
417
418 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
419
420 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
421 continue;
422
423 return r;
424 }
425
426 /* Make sure that if these memblocks need to be copied they will fit into one slot */
427 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
428 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
429
430 /* Check these are multiples of 8 bit */
431 pa_assert((areas[0].first & 7) == 0);
432 pa_assert((areas[0].step & 7)== 0);
433
434 /* We assume a single interleaved memory buffer */
435 pa_assert((areas[0].first >> 3) == 0);
436 pa_assert((areas[0].step >> 3) == u->frame_size);
437
438 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
439
440 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
441 chunk.length = pa_memblock_get_length(chunk.memblock);
442 chunk.index = 0;
443
444 pa_sink_render_into_full(u->sink, &chunk);
445 pa_memblock_unref_fixed(chunk.memblock);
446
447 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
448
449 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
450 continue;
451
452 return r;
453 }
454
455 work_done = TRUE;
456
457 u->write_count += frames * u->frame_size;
458 u->since_start += frames * u->frame_size;
459
460 #ifdef DEBUG_TIMING
461 pa_log_debug("Wrote %lu bytes", (unsigned long) (frames * u->frame_size));
462 #endif
463
464 if ((size_t) frames * u->frame_size >= n_bytes)
465 break;
466
467 n_bytes -= (size_t) frames * u->frame_size;
468 }
469 }
470
471 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
472 return work_done ? 1 : 0;
473 }
474
475 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
476 pa_bool_t work_done = FALSE;
477 pa_usec_t max_sleep_usec = 0, process_usec = 0;
478 size_t left_to_play;
479 unsigned j = 0;
480
481 pa_assert(u);
482 pa_sink_assert_ref(u->sink);
483
484 if (u->use_tsched)
485 hw_sleep_time(u, &max_sleep_usec, &process_usec);
486
487 for (;;) {
488 snd_pcm_sframes_t n;
489 size_t n_bytes;
490 int r;
491
492 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
493
494 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
495 continue;
496
497 return r;
498 }
499
500 n_bytes = (size_t) n * u->frame_size;
501 left_to_play = check_left_to_play(u, n_bytes);
502
503 if (u->use_tsched)
504
505 /* We won't fill up the playback buffer before at least
506 * half the sleep time is over because otherwise we might
507 * ask for more data from the clients then they expect. We
508 * need to guarantee that clients only have to keep around
509 * a single hw buffer length. */
510
511 if (!polled &&
512 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
513 break;
514
515 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
516
517 if (polled)
518 PA_ONCE_BEGIN {
519 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
520 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
521 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
522 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
523 pa_strnull(dn));
524 pa_xfree(dn);
525 } PA_ONCE_END;
526
527 break;
528 }
529
530 if (++j > 10) {
531 #ifdef DEBUG_TIMING
532 pa_log_debug("Not filling up, because already too many iterations.");
533 #endif
534
535 break;
536 }
537
538 n_bytes -= u->hwbuf_unused;
539 polled = FALSE;
540
541 for (;;) {
542 snd_pcm_sframes_t frames;
543 void *p;
544
545 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
546
547 if (u->memchunk.length <= 0)
548 pa_sink_render(u->sink, n_bytes, &u->memchunk);
549
550 pa_assert(u->memchunk.length > 0);
551
552 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
553
554 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
555 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
556
557 p = pa_memblock_acquire(u->memchunk.memblock);
558 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
559 pa_memblock_release(u->memchunk.memblock);
560
561 pa_assert(frames != 0);
562
563 if (PA_UNLIKELY(frames < 0)) {
564
565 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
566 continue;
567
568 return r;
569 }
570
571 u->memchunk.index += (size_t) frames * u->frame_size;
572 u->memchunk.length -= (size_t) frames * u->frame_size;
573
574 if (u->memchunk.length <= 0) {
575 pa_memblock_unref(u->memchunk.memblock);
576 pa_memchunk_reset(&u->memchunk);
577 }
578
579 work_done = TRUE;
580
581 u->write_count += frames * u->frame_size;
582 u->since_start += frames * u->frame_size;
583
584 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
585
586 if ((size_t) frames * u->frame_size >= n_bytes)
587 break;
588
589 n_bytes -= (size_t) frames * u->frame_size;
590 }
591 }
592
593 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
594 return work_done ? 1 : 0;
595 }
596
597 static void update_smoother(struct userdata *u) {
598 snd_pcm_sframes_t delay = 0;
599 int64_t position;
600 int err;
601 pa_usec_t now1 = 0, now2;
602 snd_pcm_status_t *status;
603
604 snd_pcm_status_alloca(&status);
605
606 pa_assert(u);
607 pa_assert(u->pcm_handle);
608
609 /* Let's update the time smoother */
610
611 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
612 pa_log_warn("Failed to query DSP status data: %s", snd_strerror(err));
613 return;
614 }
615
616 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
617 pa_log_warn("Failed to get timestamp: %s", snd_strerror(err));
618 else {
619 snd_htimestamp_t htstamp = { 0, 0 };
620 snd_pcm_status_get_htstamp(status, &htstamp);
621 now1 = pa_timespec_load(&htstamp);
622 }
623
624 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
625
626 if (PA_UNLIKELY(position < 0))
627 position = 0;
628
629 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
630 if (now1 <= 0)
631 now1 = pa_rtclock_usec();
632
633 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
634
635 pa_smoother_put(u->smoother, now1, now2);
636 }
637
638 static pa_usec_t sink_get_latency(struct userdata *u) {
639 pa_usec_t r;
640 int64_t delay;
641 pa_usec_t now1, now2;
642
643 pa_assert(u);
644
645 now1 = pa_rtclock_usec();
646 now2 = pa_smoother_get(u->smoother, now1);
647
648 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
649
650 r = delay >= 0 ? (pa_usec_t) delay : 0;
651
652 if (u->memchunk.memblock)
653 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
654
655 return r;
656 }
657
658 static int build_pollfd(struct userdata *u) {
659 pa_assert(u);
660 pa_assert(u->pcm_handle);
661
662 if (u->alsa_rtpoll_item)
663 pa_rtpoll_item_free(u->alsa_rtpoll_item);
664
665 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
666 return -1;
667
668 return 0;
669 }
670
671 /* Called from IO context */
672 static int suspend(struct userdata *u) {
673 pa_assert(u);
674 pa_assert(u->pcm_handle);
675
676 pa_smoother_pause(u->smoother, pa_rtclock_usec());
677
678 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
679 * take awfully long with our long buffer sizes today. */
680 snd_pcm_close(u->pcm_handle);
681 u->pcm_handle = NULL;
682
683 if (u->alsa_rtpoll_item) {
684 pa_rtpoll_item_free(u->alsa_rtpoll_item);
685 u->alsa_rtpoll_item = NULL;
686 }
687
688 pa_log_info("Device suspended...");
689
690 return 0;
691 }
692
693 /* Called from IO context */
694 static int update_sw_params(struct userdata *u) {
695 snd_pcm_uframes_t avail_min;
696 int err;
697
698 pa_assert(u);
699
700 /* Use the full buffer if noone asked us for anything specific */
701 u->hwbuf_unused = 0;
702
703 if (u->use_tsched) {
704 pa_usec_t latency;
705
706 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
707 size_t b;
708
709 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
710
711 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
712
713 /* We need at least one sample in our buffer */
714
715 if (PA_UNLIKELY(b < u->frame_size))
716 b = u->frame_size;
717
718 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
719 }
720
721 fix_min_sleep_wakeup(u);
722 fix_tsched_watermark(u);
723 }
724
725 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
726
727 /* We need at last one frame in the used part of the buffer */
728 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
729
730 if (u->use_tsched) {
731 pa_usec_t sleep_usec, process_usec;
732
733 hw_sleep_time(u, &sleep_usec, &process_usec);
734 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
735 }
736
737 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
738
739 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
740 pa_log("Failed to set software parameters: %s", snd_strerror(err));
741 return err;
742 }
743
744 pa_sink_set_max_request(u->sink, u->hwbuf_size - u->hwbuf_unused);
745
746 return 0;
747 }
748
749 /* Called from IO context */
750 static int unsuspend(struct userdata *u) {
751 pa_sample_spec ss;
752 int err;
753 pa_bool_t b, d;
754 unsigned nfrags;
755 snd_pcm_uframes_t period_size;
756
757 pa_assert(u);
758 pa_assert(!u->pcm_handle);
759
760 pa_log_info("Trying resume...");
761
762 snd_config_update_free_global();
763 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
764 /*SND_PCM_NONBLOCK|*/
765 SND_PCM_NO_AUTO_RESAMPLE|
766 SND_PCM_NO_AUTO_CHANNELS|
767 SND_PCM_NO_AUTO_FORMAT)) < 0) {
768 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
769 goto fail;
770 }
771
772 ss = u->sink->sample_spec;
773 nfrags = u->nfragments;
774 period_size = u->fragment_size / u->frame_size;
775 b = u->use_mmap;
776 d = u->use_tsched;
777
778 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
779 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
780 goto fail;
781 }
782
783 if (b != u->use_mmap || d != u->use_tsched) {
784 pa_log_warn("Resume failed, couldn't get original access mode.");
785 goto fail;
786 }
787
788 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
789 pa_log_warn("Resume failed, couldn't restore original sample settings.");
790 goto fail;
791 }
792
793 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
794 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
795 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
796 (unsigned long) nfrags, period_size * u->frame_size);
797 goto fail;
798 }
799
800 if (update_sw_params(u) < 0)
801 goto fail;
802
803 if (build_pollfd(u) < 0)
804 goto fail;
805
806 u->first = TRUE;
807 u->since_start = 0;
808
809 pa_log_info("Resumed successfully...");
810
811 return 0;
812
813 fail:
814 if (u->pcm_handle) {
815 snd_pcm_close(u->pcm_handle);
816 u->pcm_handle = NULL;
817 }
818
819 return -1;
820 }
821
822 /* Called from IO context */
823 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
824 struct userdata *u = PA_SINK(o)->userdata;
825
826 switch (code) {
827
828 case PA_SINK_MESSAGE_GET_LATENCY: {
829 pa_usec_t r = 0;
830
831 if (u->pcm_handle)
832 r = sink_get_latency(u);
833
834 *((pa_usec_t*) data) = r;
835
836 return 0;
837 }
838
839 case PA_SINK_MESSAGE_SET_STATE:
840
841 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
842
843 case PA_SINK_SUSPENDED:
844 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
845
846 if (suspend(u) < 0)
847 return -1;
848
849 break;
850
851 case PA_SINK_IDLE:
852 case PA_SINK_RUNNING:
853
854 if (u->sink->thread_info.state == PA_SINK_INIT) {
855 if (build_pollfd(u) < 0)
856 return -1;
857 }
858
859 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
860 if (unsuspend(u) < 0)
861 return -1;
862 }
863
864 break;
865
866 case PA_SINK_UNLINKED:
867 case PA_SINK_INIT:
868 case PA_SINK_INVALID_STATE:
869 ;
870 }
871
872 break;
873 }
874
875 return pa_sink_process_msg(o, code, data, offset, chunk);
876 }
877
878 /* Called from main context */
879 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
880 pa_sink_state_t old_state;
881 struct userdata *u;
882
883 pa_sink_assert_ref(s);
884 pa_assert_se(u = s->userdata);
885
886 old_state = pa_sink_get_state(u->sink);
887
888 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
889 reserve_done(u);
890 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
891 if (reserve_init(u, u->device_name) < 0)
892 return -1;
893
894 return 0;
895 }
896
897 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
898 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
899
900 pa_assert(u);
901 pa_assert(u->mixer_handle);
902
903 if (mask == SND_CTL_EVENT_MASK_REMOVE)
904 return 0;
905
906 if (mask & SND_CTL_EVENT_MASK_VALUE) {
907 pa_sink_get_volume(u->sink, TRUE);
908 pa_sink_get_mute(u->sink, TRUE);
909 }
910
911 return 0;
912 }
913
914 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
915
916 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
917 (double) (u->hw_volume_max - u->hw_volume_min));
918 }
919
920 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
921 long alsa_vol;
922
923 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
924 / PA_VOLUME_NORM) + u->hw_volume_min;
925
926 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
927 }
928
929 static void sink_get_volume_cb(pa_sink *s) {
930 struct userdata *u = s->userdata;
931 int err;
932 unsigned i;
933 pa_cvolume r;
934 char t[PA_CVOLUME_SNPRINT_MAX];
935
936 pa_assert(u);
937 pa_assert(u->mixer_elem);
938
939 if (u->mixer_seperate_channels) {
940
941 r.channels = s->sample_spec.channels;
942
943 for (i = 0; i < s->sample_spec.channels; i++) {
944 long alsa_vol;
945
946 if (u->hw_dB_supported) {
947
948 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
949 goto fail;
950
951 #ifdef HAVE_VALGRIND_MEMCHECK_H
952 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
953 #endif
954
955 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
956 } else {
957
958 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
959 goto fail;
960
961 r.values[i] = from_alsa_volume(u, alsa_vol);
962 }
963 }
964
965 } else {
966 long alsa_vol;
967
968 if (u->hw_dB_supported) {
969
970 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
971 goto fail;
972
973 #ifdef HAVE_VALGRIND_MEMCHECK_H
974 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
975 #endif
976
977 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
978
979 } else {
980
981 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
982 goto fail;
983
984 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
985 }
986 }
987
988 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
989
990 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
991
992 s->virtual_volume = u->hardware_volume = r;
993
994 if (u->hw_dB_supported) {
995 pa_cvolume reset;
996
997 /* Hmm, so the hardware volume changed, let's reset our software volume */
998 pa_cvolume_reset(&reset, s->sample_spec.channels);
999 pa_sink_set_soft_volume(s, &reset);
1000 }
1001 }
1002
1003 return;
1004
1005 fail:
1006 pa_log_error("Unable to read volume: %s", snd_strerror(err));
1007 }
1008
1009 static void sink_set_volume_cb(pa_sink *s) {
1010 struct userdata *u = s->userdata;
1011 int err;
1012 unsigned i;
1013 pa_cvolume r;
1014
1015 pa_assert(u);
1016 pa_assert(u->mixer_elem);
1017
1018 if (u->mixer_seperate_channels) {
1019
1020 r.channels = s->sample_spec.channels;
1021
1022 for (i = 0; i < s->sample_spec.channels; i++) {
1023 long alsa_vol;
1024 pa_volume_t vol;
1025
1026 vol = s->virtual_volume.values[i];
1027
1028 if (u->hw_dB_supported) {
1029
1030 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
1031 alsa_vol += u->hw_dB_max;
1032 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
1033
1034 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
1035 goto fail;
1036
1037 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
1038 goto fail;
1039
1040 #ifdef HAVE_VALGRIND_MEMCHECK_H
1041 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1042 #endif
1043
1044 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
1045
1046 } else {
1047 alsa_vol = to_alsa_volume(u, vol);
1048
1049 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
1050 goto fail;
1051
1052 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
1053 goto fail;
1054
1055 r.values[i] = from_alsa_volume(u, alsa_vol);
1056 }
1057 }
1058
1059 } else {
1060 pa_volume_t vol;
1061 long alsa_vol;
1062
1063 vol = pa_cvolume_max(&s->virtual_volume);
1064
1065 if (u->hw_dB_supported) {
1066 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
1067 alsa_vol += u->hw_dB_max;
1068 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
1069
1070 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
1071 goto fail;
1072
1073 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1074 goto fail;
1075
1076 #ifdef HAVE_VALGRIND_MEMCHECK_H
1077 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1078 #endif
1079
1080 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
1081
1082 } else {
1083 alsa_vol = to_alsa_volume(u, vol);
1084
1085 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
1086 goto fail;
1087
1088 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1089 goto fail;
1090
1091 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
1092 }
1093 }
1094
1095 u->hardware_volume = r;
1096
1097 if (u->hw_dB_supported) {
1098 char t[PA_CVOLUME_SNPRINT_MAX];
1099
1100 /* Match exactly what the user requested by software */
1101 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1102
1103 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1104 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1105 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1106
1107 } else
1108
1109 /* We can't match exactly what the user requested, hence let's
1110 * at least tell the user about it */
1111
1112 s->virtual_volume = r;
1113
1114 return;
1115
1116 fail:
1117 pa_log_error("Unable to set volume: %s", snd_strerror(err));
1118 }
1119
1120 static void sink_get_mute_cb(pa_sink *s) {
1121 struct userdata *u = s->userdata;
1122 int err, sw;
1123
1124 pa_assert(u);
1125 pa_assert(u->mixer_elem);
1126
1127 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
1128 pa_log_error("Unable to get switch: %s", snd_strerror(err));
1129 return;
1130 }
1131
1132 s->muted = !sw;
1133 }
1134
1135 static void sink_set_mute_cb(pa_sink *s) {
1136 struct userdata *u = s->userdata;
1137 int err;
1138
1139 pa_assert(u);
1140 pa_assert(u->mixer_elem);
1141
1142 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
1143 pa_log_error("Unable to set switch: %s", snd_strerror(err));
1144 return;
1145 }
1146 }
1147
1148 static void sink_update_requested_latency_cb(pa_sink *s) {
1149 struct userdata *u = s->userdata;
1150 size_t before;
1151 pa_assert(u);
1152
1153 if (!u->pcm_handle)
1154 return;
1155
1156 before = u->hwbuf_unused;
1157 update_sw_params(u);
1158
1159 /* Let's check whether we now use only a smaller part of the
1160 buffer then before. If so, we need to make sure that subsequent
1161 rewinds are relative to the new maxium fill level and not to the
1162 current fill level. Thus, let's do a full rewind once, to clear
1163 things up. */
1164
1165 if (u->hwbuf_unused > before) {
1166 pa_log_debug("Requesting rewind due to latency change.");
1167 pa_sink_request_rewind(s, (size_t) -1);
1168 }
1169 }
1170
1171 static int process_rewind(struct userdata *u) {
1172 snd_pcm_sframes_t unused;
1173 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1174 pa_assert(u);
1175
1176 /* Figure out how much we shall rewind and reset the counter */
1177 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1178 u->sink->thread_info.rewind_nbytes = 0;
1179
1180 if (rewind_nbytes <= 0)
1181 goto finish;
1182
1183 pa_assert(rewind_nbytes > 0);
1184 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1185
1186 snd_pcm_hwsync(u->pcm_handle);
1187 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
1188 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused));
1189 return -1;
1190 }
1191
1192 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1193
1194 if (u->hwbuf_size > unused_nbytes)
1195 limit_nbytes = u->hwbuf_size - unused_nbytes;
1196 else
1197 limit_nbytes = 0;
1198
1199 if (rewind_nbytes > limit_nbytes)
1200 rewind_nbytes = limit_nbytes;
1201
1202 if (rewind_nbytes > 0) {
1203 snd_pcm_sframes_t in_frames, out_frames;
1204
1205 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1206
1207 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1208 pa_log_debug("before: %lu", (unsigned long) in_frames);
1209 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1210 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1211 return -1;
1212 }
1213 pa_log_debug("after: %lu", (unsigned long) out_frames);
1214
1215 rewind_nbytes = (size_t) out_frames * u->frame_size;
1216
1217 if (rewind_nbytes <= 0)
1218 pa_log_info("Tried rewind, but was apparently not possible.");
1219 else {
1220 u->write_count -= out_frames * u->frame_size;
1221 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1222 pa_sink_process_rewind(u->sink, rewind_nbytes);
1223
1224 u->after_rewind = TRUE;
1225 return 0;
1226 }
1227 } else
1228 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1229
1230 finish:
1231
1232 pa_sink_process_rewind(u->sink, 0);
1233
1234 return 0;
1235
1236 }
1237
1238 static void thread_func(void *userdata) {
1239 struct userdata *u = userdata;
1240 unsigned short revents = 0;
1241
1242 pa_assert(u);
1243
1244 pa_log_debug("Thread starting up");
1245
1246 if (u->core->realtime_scheduling)
1247 pa_make_realtime(u->core->realtime_priority);
1248
1249 pa_thread_mq_install(&u->thread_mq);
1250 pa_rtpoll_install(u->rtpoll);
1251
1252 for (;;) {
1253 int ret;
1254
1255 #ifdef DEBUG_TIMING
1256 pa_log_debug("Loop");
1257 #endif
1258
1259 /* Render some data and write it to the dsp */
1260 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1261 int work_done;
1262 pa_usec_t sleep_usec = 0;
1263
1264 if (u->sink->thread_info.rewind_requested)
1265 if (process_rewind(u) < 0)
1266 goto fail;
1267
1268 if (u->use_mmap)
1269 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1270 else
1271 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1272
1273 if (work_done < 0)
1274 goto fail;
1275
1276 /* pa_log_debug("work_done = %i", work_done); */
1277
1278 if (work_done) {
1279
1280 if (u->first) {
1281 pa_log_info("Starting playback.");
1282 snd_pcm_start(u->pcm_handle);
1283
1284 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1285 }
1286
1287 update_smoother(u);
1288 }
1289
1290 if (u->use_tsched) {
1291 pa_usec_t cusec;
1292
1293 if (u->since_start <= u->hwbuf_size) {
1294
1295 /* USB devices on ALSA seem to hit a buffer
1296 * underrun during the first iterations much
1297 * quicker then we calculate here, probably due to
1298 * the transport latency. To accomodate for that
1299 * we artificially decrease the sleep time until
1300 * we have filled the buffer at least once
1301 * completely.*/
1302
1303 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1304 sleep_usec /= 2;
1305 }
1306
1307 /* OK, the playback buffer is now full, let's
1308 * calculate when to wake up next */
1309 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1310
1311 /* Convert from the sound card time domain to the
1312 * system time domain */
1313 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1314
1315 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1316
1317 /* We don't trust the conversion, so we wake up whatever comes first */
1318 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1319 }
1320
1321 u->first = FALSE;
1322 u->after_rewind = FALSE;
1323
1324 } else if (u->use_tsched)
1325
1326 /* OK, we're in an invalid state, let's disable our timers */
1327 pa_rtpoll_set_timer_disabled(u->rtpoll);
1328
1329 /* Hmm, nothing to do. Let's sleep */
1330 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1331 goto fail;
1332
1333 if (ret == 0)
1334 goto finish;
1335
1336 /* Tell ALSA about this and process its response */
1337 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1338 struct pollfd *pollfd;
1339 int err;
1340 unsigned n;
1341
1342 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1343
1344 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1345 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1346 goto fail;
1347 }
1348
1349 if (revents & ~POLLOUT) {
1350 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1351 goto fail;
1352
1353 u->first = TRUE;
1354 u->since_start = 0;
1355 } else if (revents && u->use_tsched && pa_log_ratelimit())
1356 pa_log_debug("Wakeup from ALSA!");
1357
1358 } else
1359 revents = 0;
1360 }
1361
1362 fail:
1363 /* If this was no regular exit from the loop we have to continue
1364 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1365 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1366 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1367
1368 finish:
1369 pa_log_debug("Thread shutting down");
1370 }
1371
1372 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name) {
1373 const char *n;
1374 char *t;
1375
1376 pa_assert(data);
1377 pa_assert(ma);
1378 pa_assert(device_name);
1379
1380 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1381 pa_sink_new_data_set_name(data, n);
1382 data->namereg_fail = TRUE;
1383 return;
1384 }
1385
1386 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1387 data->namereg_fail = TRUE;
1388 else {
1389 n = device_id ? device_id : device_name;
1390 data->namereg_fail = FALSE;
1391 }
1392
1393 t = pa_sprintf_malloc("alsa_output.%s", n);
1394 pa_sink_new_data_set_name(data, t);
1395 pa_xfree(t);
1396 }
1397
1398 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1399 pa_assert(u);
1400
1401 if (!u->mixer_handle)
1402 return 0;
1403
1404 pa_assert(u->mixer_elem);
1405
1406 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1407 pa_bool_t suitable = FALSE;
1408
1409 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1410 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1411 else if (u->hw_volume_min >= u->hw_volume_max)
1412 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1413 else {
1414 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1415 suitable = TRUE;
1416 }
1417
1418 if (suitable) {
1419 if (ignore_dB || snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1420 pa_log_info("Mixer doesn't support dB information or data is ignored.");
1421 else {
1422 #ifdef HAVE_VALGRIND_MEMCHECK_H
1423 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1424 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1425 #endif
1426
1427 if (u->hw_dB_min >= u->hw_dB_max)
1428 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1429 else {
1430 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1431 u->hw_dB_supported = TRUE;
1432
1433 if (u->hw_dB_max > 0) {
1434 u->sink->base_volume = pa_sw_volume_from_dB(- (double) u->hw_dB_max/100.0);
1435 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1436 } else
1437 pa_log_info("No particular base volume set, fixing to 0 dB");
1438 }
1439 }
1440
1441 if (!u->hw_dB_supported &&
1442 u->hw_volume_max - u->hw_volume_min < 3) {
1443
1444 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1445 suitable = FALSE;
1446 }
1447 }
1448
1449 if (suitable) {
1450 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &u->sink->channel_map, u->mixer_map, TRUE) >= 0;
1451
1452 u->sink->get_volume = sink_get_volume_cb;
1453 u->sink->set_volume = sink_set_volume_cb;
1454 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1455 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1456
1457 if (!u->hw_dB_supported)
1458 u->sink->n_volume_steps = u->hw_volume_max - u->hw_volume_min + 1;
1459 } else
1460 pa_log_info("Using software volume control.");
1461 }
1462
1463 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1464 u->sink->get_mute = sink_get_mute_cb;
1465 u->sink->set_mute = sink_set_mute_cb;
1466 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1467 } else
1468 pa_log_info("Using software mute control.");
1469
1470 u->mixer_fdl = pa_alsa_fdlist_new();
1471
1472 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1473 pa_log("Failed to initialize file descriptor monitoring");
1474 return -1;
1475 }
1476
1477 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1478 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1479
1480 return 0;
1481 }
1482
1483 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, const pa_alsa_profile_info *profile) {
1484
1485 struct userdata *u = NULL;
1486 const char *dev_id = NULL;
1487 pa_sample_spec ss;
1488 pa_channel_map map;
1489 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1490 snd_pcm_uframes_t period_frames, tsched_frames;
1491 size_t frame_size;
1492 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1493 pa_usec_t usec;
1494 pa_sink_new_data data;
1495
1496 pa_assert(m);
1497 pa_assert(ma);
1498
1499 ss = m->core->default_sample_spec;
1500 map = m->core->default_channel_map;
1501 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1502 pa_log("Failed to parse sample specification and channel map");
1503 goto fail;
1504 }
1505
1506 frame_size = pa_frame_size(&ss);
1507
1508 nfrags = m->core->default_n_fragments;
1509 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1510 if (frag_size <= 0)
1511 frag_size = (uint32_t) frame_size;
1512 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1513 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1514
1515 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1516 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1517 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1518 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1519 pa_log("Failed to parse buffer metrics");
1520 goto fail;
1521 }
1522
1523 hwbuf_size = frag_size * nfrags;
1524 period_frames = frag_size/frame_size;
1525 tsched_frames = tsched_size/frame_size;
1526
1527 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1528 pa_log("Failed to parse mmap argument.");
1529 goto fail;
1530 }
1531
1532 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1533 pa_log("Failed to parse tsched argument.");
1534 goto fail;
1535 }
1536
1537 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1538 pa_log("Failed to parse ignore_dB argument.");
1539 goto fail;
1540 }
1541
1542 if (use_tsched && !pa_rtclock_hrtimer()) {
1543 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1544 use_tsched = FALSE;
1545 }
1546
1547 u = pa_xnew0(struct userdata, 1);
1548 u->core = m->core;
1549 u->module = m;
1550 u->use_mmap = use_mmap;
1551 u->use_tsched = use_tsched;
1552 u->first = TRUE;
1553 u->rtpoll = pa_rtpoll_new();
1554 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1555
1556 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1557 usec = pa_rtclock_usec();
1558 pa_smoother_set_time_offset(u->smoother, usec);
1559 pa_smoother_pause(u->smoother, usec);
1560
1561 if (reserve_init(u, pa_modargs_get_value(
1562 ma, "device_id",
1563 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE))) < 0)
1564 goto fail;
1565
1566 b = use_mmap;
1567 d = use_tsched;
1568
1569 if (profile) {
1570
1571 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1572 pa_log("device_id= not set");
1573 goto fail;
1574 }
1575
1576 if (!(u->pcm_handle = pa_alsa_open_by_device_id_profile(
1577 dev_id,
1578 &u->device_name,
1579 &ss, &map,
1580 SND_PCM_STREAM_PLAYBACK,
1581 &nfrags, &period_frames, tsched_frames,
1582 &b, &d, profile)))
1583
1584 goto fail;
1585
1586 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1587
1588 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1589 dev_id,
1590 &u->device_name,
1591 &ss, &map,
1592 SND_PCM_STREAM_PLAYBACK,
1593 &nfrags, &period_frames, tsched_frames,
1594 &b, &d, &profile)))
1595
1596 goto fail;
1597
1598 } else {
1599
1600 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1601 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1602 &u->device_name,
1603 &ss, &map,
1604 SND_PCM_STREAM_PLAYBACK,
1605 &nfrags, &period_frames, tsched_frames,
1606 &b, &d, FALSE)))
1607 goto fail;
1608
1609 }
1610
1611 pa_assert(u->device_name);
1612 pa_log_info("Successfully opened device %s.", u->device_name);
1613
1614 if (profile)
1615 pa_log_info("Selected configuration '%s' (%s).", profile->description, profile->name);
1616
1617 if (use_mmap && !b) {
1618 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1619 u->use_mmap = use_mmap = FALSE;
1620 }
1621
1622 if (use_tsched && (!b || !d)) {
1623 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1624 u->use_tsched = use_tsched = FALSE;
1625 }
1626
1627 if (u->use_mmap)
1628 pa_log_info("Successfully enabled mmap() mode.");
1629
1630 if (u->use_tsched)
1631 pa_log_info("Successfully enabled timer-based scheduling mode.");
1632
1633 /* ALSA might tweak the sample spec, so recalculate the frame size */
1634 frame_size = pa_frame_size(&ss);
1635
1636 pa_alsa_find_mixer_and_elem(u->pcm_handle, &u->mixer_handle, &u->mixer_elem);
1637
1638 pa_sink_new_data_init(&data);
1639 data.driver = driver;
1640 data.module = m;
1641 data.card = card;
1642 set_sink_name(&data, ma, dev_id, u->device_name);
1643 pa_sink_new_data_set_sample_spec(&data, &ss);
1644 pa_sink_new_data_set_channel_map(&data, &map);
1645
1646 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1647 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1648 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1649 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1650 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1651
1652 if (profile) {
1653 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, profile->name);
1654 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, profile->description);
1655 }
1656
1657 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1658 pa_sink_new_data_done(&data);
1659
1660 if (!u->sink) {
1661 pa_log("Failed to create sink object");
1662 goto fail;
1663 }
1664
1665 u->sink->parent.process_msg = sink_process_msg;
1666 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1667 u->sink->set_state = sink_set_state_cb;
1668 u->sink->userdata = u;
1669
1670 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1671 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1672
1673 u->frame_size = frame_size;
1674 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1675 u->nfragments = nfrags;
1676 u->hwbuf_size = u->fragment_size * nfrags;
1677 u->tsched_watermark = tsched_watermark;
1678 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1679
1680 if (use_tsched) {
1681 fix_min_sleep_wakeup(u);
1682 fix_tsched_watermark(u);
1683 }
1684
1685 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1686 u->sink->thread_info.max_request = u->hwbuf_size;
1687
1688 pa_sink_set_latency_range(u->sink,
1689 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1690 pa_bytes_to_usec(u->hwbuf_size, &ss));
1691
1692 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1693 nfrags, (long unsigned) u->fragment_size,
1694 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1695
1696 if (use_tsched)
1697 pa_log_info("Time scheduling watermark is %0.2fms",
1698 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1699
1700 reserve_update(u);
1701
1702 if (update_sw_params(u) < 0)
1703 goto fail;
1704
1705 if (setup_mixer(u, ignore_dB) < 0)
1706 goto fail;
1707
1708 pa_alsa_dump(u->pcm_handle);
1709
1710 if (!(u->thread = pa_thread_new(thread_func, u))) {
1711 pa_log("Failed to create thread.");
1712 goto fail;
1713 }
1714
1715 /* Get initial mixer settings */
1716 if (data.volume_is_set) {
1717 if (u->sink->set_volume)
1718 u->sink->set_volume(u->sink);
1719 } else {
1720 if (u->sink->get_volume)
1721 u->sink->get_volume(u->sink);
1722 }
1723
1724 if (data.muted_is_set) {
1725 if (u->sink->set_mute)
1726 u->sink->set_mute(u->sink);
1727 } else {
1728 if (u->sink->get_mute)
1729 u->sink->get_mute(u->sink);
1730 }
1731
1732 pa_sink_put(u->sink);
1733
1734 return u->sink;
1735
1736 fail:
1737
1738 userdata_free(u);
1739
1740 return NULL;
1741 }
1742
1743 static void userdata_free(struct userdata *u) {
1744 pa_assert(u);
1745
1746 if (u->sink)
1747 pa_sink_unlink(u->sink);
1748
1749 if (u->thread) {
1750 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1751 pa_thread_free(u->thread);
1752 }
1753
1754 pa_thread_mq_done(&u->thread_mq);
1755
1756 if (u->sink)
1757 pa_sink_unref(u->sink);
1758
1759 if (u->memchunk.memblock)
1760 pa_memblock_unref(u->memchunk.memblock);
1761
1762 if (u->alsa_rtpoll_item)
1763 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1764
1765 if (u->rtpoll)
1766 pa_rtpoll_free(u->rtpoll);
1767
1768 if (u->mixer_fdl)
1769 pa_alsa_fdlist_free(u->mixer_fdl);
1770
1771 if (u->mixer_handle)
1772 snd_mixer_close(u->mixer_handle);
1773
1774 if (u->pcm_handle) {
1775 snd_pcm_drop(u->pcm_handle);
1776 snd_pcm_close(u->pcm_handle);
1777 }
1778
1779 if (u->smoother)
1780 pa_smoother_free(u->smoother);
1781
1782 reserve_done(u);
1783
1784 pa_xfree(u->device_name);
1785 pa_xfree(u);
1786 }
1787
1788 void pa_alsa_sink_free(pa_sink *s) {
1789 struct userdata *u;
1790
1791 pa_sink_assert_ref(s);
1792 pa_assert_se(u = s->userdata);
1793
1794 userdata_free(u);
1795 }