]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
in case alsa lies to use don't spin forever
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38 #include <pulse/i18n.h>
39
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/rtclock.h>
54 #include <pulsecore/time-smoother.h>
55
56 #include "alsa-util.h"
57 #include "alsa-sink.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
65 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
66
67 struct userdata {
68 pa_core *core;
69 pa_module *module;
70 pa_sink *sink;
71
72 pa_thread *thread;
73 pa_thread_mq thread_mq;
74 pa_rtpoll *rtpoll;
75
76 snd_pcm_t *pcm_handle;
77
78 pa_alsa_fdlist *mixer_fdl;
79 snd_mixer_t *mixer_handle;
80 snd_mixer_elem_t *mixer_elem;
81 long hw_volume_max, hw_volume_min;
82 long hw_dB_max, hw_dB_min;
83 pa_bool_t hw_dB_supported:1;
84 pa_bool_t mixer_seperate_channels:1;
85 pa_cvolume hardware_volume;
86
87 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark, hwbuf_unused, min_sleep, min_wakeup;
88 unsigned nfragments;
89 pa_memchunk memchunk;
90
91 char *device_name;
92
93 pa_bool_t use_mmap:1, use_tsched:1;
94
95 pa_bool_t first, after_rewind;
96
97 pa_rtpoll_item *alsa_rtpoll_item;
98
99 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
100
101 pa_smoother *smoother;
102 uint64_t write_count;
103 uint64_t since_start;
104 };
105
106 static void userdata_free(struct userdata *u);
107
108 static void fix_min_sleep_wakeup(struct userdata *u) {
109 size_t max_use, max_use_2;
110
111 pa_assert(u);
112
113 max_use = u->hwbuf_size - u->hwbuf_unused;
114 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
115
116 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
117 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
118
119 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
120 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
121 }
122
123 static void fix_tsched_watermark(struct userdata *u) {
124 size_t max_use;
125 pa_assert(u);
126
127 max_use = u->hwbuf_size - u->hwbuf_unused;
128
129 if (u->tsched_watermark > max_use - u->min_sleep)
130 u->tsched_watermark = max_use - u->min_sleep;
131
132 if (u->tsched_watermark < u->min_wakeup)
133 u->tsched_watermark = u->min_wakeup;
134 }
135
136 static void adjust_after_underrun(struct userdata *u) {
137 size_t old_watermark;
138 pa_usec_t old_min_latency, new_min_latency;
139
140 pa_assert(u);
141
142 /* First, just try to increase the watermark */
143 old_watermark = u->tsched_watermark;
144 u->tsched_watermark *= 2;
145 fix_tsched_watermark(u);
146
147 if (old_watermark != u->tsched_watermark) {
148 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
149 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
150 return;
151 }
152
153 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
154 old_min_latency = u->sink->thread_info.min_latency;
155 new_min_latency = PA_MIN(old_min_latency * 2, u->sink->thread_info.max_latency);
156
157 if (old_min_latency != new_min_latency) {
158 pa_log_notice("Increasing minimal latency to %0.2f ms",
159 (double) new_min_latency / PA_USEC_PER_MSEC);
160
161 pa_sink_update_latency_range(u->sink, new_min_latency, u->sink->thread_info.max_latency);
162 return;
163 }
164
165 /* When we reach this we're officialy fucked! */
166 }
167
168 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
169 pa_usec_t usec, wm;
170
171 pa_assert(sleep_usec);
172 pa_assert(process_usec);
173
174 pa_assert(u);
175
176 usec = pa_sink_get_requested_latency_within_thread(u->sink);
177
178 if (usec == (pa_usec_t) -1)
179 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
180
181 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
182
183 if (wm > usec)
184 wm = usec/2;
185
186 *sleep_usec = usec - wm;
187 *process_usec = wm;
188
189 #ifdef DEBUG_TIMING
190 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
191 (unsigned long) (usec / PA_USEC_PER_MSEC),
192 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
193 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
194 #endif
195 }
196
197 static int try_recover(struct userdata *u, const char *call, int err) {
198 pa_assert(u);
199 pa_assert(call);
200 pa_assert(err < 0);
201
202 pa_log_debug("%s: %s", call, snd_strerror(err));
203
204 pa_assert(err != -EAGAIN);
205
206 if (err == -EPIPE)
207 pa_log_debug("%s: Buffer underrun!", call);
208
209 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
210 pa_log("%s: %s", call, snd_strerror(err));
211 return -1;
212 }
213
214 u->first = TRUE;
215 u->since_start = 0;
216 return 0;
217 }
218
219 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
220 size_t left_to_play;
221
222 /* We use <= instead of < for this check here because an underrun
223 * only happens after the last sample was processed, not already when
224 * it is removed from the buffer. This is particularly important
225 * when block transfer is used. */
226
227 if (n_bytes <= u->hwbuf_size) {
228 left_to_play = u->hwbuf_size - n_bytes;
229
230 #ifdef DEBUG_TIMING
231 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
232 #endif
233
234 } else {
235 left_to_play = 0;
236
237 #ifdef DEBUG_TIMING
238 PA_DEBUG_TRAP;
239 #endif
240
241 if (!u->first && !u->after_rewind) {
242
243 if (pa_log_ratelimit())
244 pa_log_info("Underrun!");
245
246 if (u->use_tsched)
247 adjust_after_underrun(u);
248 }
249 }
250
251 return left_to_play;
252 }
253
254 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
255 pa_bool_t work_done = TRUE;
256 pa_usec_t max_sleep_usec = 0, process_usec = 0;
257 size_t left_to_play;
258 unsigned j = 0;
259
260 pa_assert(u);
261 pa_sink_assert_ref(u->sink);
262
263 if (u->use_tsched)
264 hw_sleep_time(u, &max_sleep_usec, &process_usec);
265
266 for (;;) {
267 snd_pcm_sframes_t n;
268 size_t n_bytes;
269 int r;
270
271 /* First we determine how many samples are missing to fill the
272 * buffer up to 100% */
273
274 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
275
276 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
277 continue;
278
279 return r;
280 }
281
282 n_bytes = (size_t) n * u->frame_size;
283
284 #ifdef DEBUG_TIMING
285 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
286 #endif
287
288 left_to_play = check_left_to_play(u, n_bytes);
289
290 if (u->use_tsched)
291
292 /* We won't fill up the playback buffer before at least
293 * half the sleep time is over because otherwise we might
294 * ask for more data from the clients then they expect. We
295 * need to guarantee that clients only have to keep around
296 * a single hw buffer length. */
297
298 if (!polled &&
299 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
300 #ifdef DEBUG_TIMING
301 pa_log_debug("Not filling up, because too early.");
302 #endif
303 break;
304 }
305
306 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
307
308 if (polled)
309 PA_ONCE_BEGIN {
310 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
311 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
312 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
313 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
314 pa_strnull(dn));
315 pa_xfree(dn);
316 } PA_ONCE_END;
317
318 #ifdef DEBUG_TIMING
319 pa_log_debug("Not filling up, because not necessary.");
320 #endif
321 break;
322 }
323
324
325 if (++j > 10) {
326 #ifdef DEBUG_TIMING
327 pa_log_debug("Not filling up, because already too many iterations.");
328 #endif
329
330 break;
331 }
332
333 n_bytes -= u->hwbuf_unused;
334 polled = FALSE;
335
336 #ifdef DEBUG_TIMING
337 pa_log_debug("Filling up");
338 #endif
339
340 for (;;) {
341 pa_memchunk chunk;
342 void *p;
343 int err;
344 const snd_pcm_channel_area_t *areas;
345 snd_pcm_uframes_t offset, frames;
346 snd_pcm_sframes_t sframes;
347
348 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
349 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
350
351 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
352
353 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
354 continue;
355
356 return r;
357 }
358
359 /* Make sure that if these memblocks need to be copied they will fit into one slot */
360 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
361 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
362
363 /* Check these are multiples of 8 bit */
364 pa_assert((areas[0].first & 7) == 0);
365 pa_assert((areas[0].step & 7)== 0);
366
367 /* We assume a single interleaved memory buffer */
368 pa_assert((areas[0].first >> 3) == 0);
369 pa_assert((areas[0].step >> 3) == u->frame_size);
370
371 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
372
373 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
374 chunk.length = pa_memblock_get_length(chunk.memblock);
375 chunk.index = 0;
376
377 pa_sink_render_into_full(u->sink, &chunk);
378 pa_memblock_unref_fixed(chunk.memblock);
379
380 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
381
382 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
383 continue;
384
385 return r;
386 }
387
388 work_done = TRUE;
389
390 u->write_count += frames * u->frame_size;
391 u->since_start += frames * u->frame_size;
392
393 #ifdef DEBUG_TIMING
394 pa_log_debug("Wrote %lu bytes", (unsigned long) (frames * u->frame_size));
395 #endif
396
397 if ((size_t) frames * u->frame_size >= n_bytes)
398 break;
399
400 n_bytes -= (size_t) frames * u->frame_size;
401 }
402 }
403
404 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
405 return work_done ? 1 : 0;
406 }
407
408 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
409 pa_bool_t work_done = FALSE;
410 pa_usec_t max_sleep_usec = 0, process_usec = 0;
411 size_t left_to_play;
412 unsigned j = 0;
413
414 pa_assert(u);
415 pa_sink_assert_ref(u->sink);
416
417 if (u->use_tsched)
418 hw_sleep_time(u, &max_sleep_usec, &process_usec);
419
420 for (;;) {
421 snd_pcm_sframes_t n;
422 size_t n_bytes;
423 int r;
424
425 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
426
427 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
428 continue;
429
430 return r;
431 }
432
433 n_bytes = (size_t) n * u->frame_size;
434 left_to_play = check_left_to_play(u, n_bytes);
435
436 if (u->use_tsched)
437
438 /* We won't fill up the playback buffer before at least
439 * half the sleep time is over because otherwise we might
440 * ask for more data from the clients then they expect. We
441 * need to guarantee that clients only have to keep around
442 * a single hw buffer length. */
443
444 if (!polled &&
445 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
446 break;
447
448 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
449
450 if (polled)
451 PA_ONCE_BEGIN {
452 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
453 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
454 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
455 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
456 pa_strnull(dn));
457 pa_xfree(dn);
458 } PA_ONCE_END;
459
460 break;
461 }
462
463 if (++j > 10) {
464 #ifdef DEBUG_TIMING
465 pa_log_debug("Not filling up, because already too many iterations.");
466 #endif
467
468 break;
469 }
470
471 n_bytes -= u->hwbuf_unused;
472 polled = FALSE;
473
474 for (;;) {
475 snd_pcm_sframes_t frames;
476 void *p;
477
478 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
479
480 if (u->memchunk.length <= 0)
481 pa_sink_render(u->sink, n_bytes, &u->memchunk);
482
483 pa_assert(u->memchunk.length > 0);
484
485 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
486
487 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
488 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
489
490 p = pa_memblock_acquire(u->memchunk.memblock);
491 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
492 pa_memblock_release(u->memchunk.memblock);
493
494 pa_assert(frames != 0);
495
496 if (PA_UNLIKELY(frames < 0)) {
497
498 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
499 continue;
500
501 return r;
502 }
503
504 u->memchunk.index += (size_t) frames * u->frame_size;
505 u->memchunk.length -= (size_t) frames * u->frame_size;
506
507 if (u->memchunk.length <= 0) {
508 pa_memblock_unref(u->memchunk.memblock);
509 pa_memchunk_reset(&u->memchunk);
510 }
511
512 work_done = TRUE;
513
514 u->write_count += frames * u->frame_size;
515 u->since_start += frames * u->frame_size;
516
517 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
518
519 if ((size_t) frames * u->frame_size >= n_bytes)
520 break;
521
522 n_bytes -= (size_t) frames * u->frame_size;
523 }
524 }
525
526 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
527 return work_done ? 1 : 0;
528 }
529
530 static void update_smoother(struct userdata *u) {
531 snd_pcm_sframes_t delay = 0;
532 int64_t position;
533 int err;
534 pa_usec_t now1 = 0, now2;
535 snd_pcm_status_t *status;
536
537 snd_pcm_status_alloca(&status);
538
539 pa_assert(u);
540 pa_assert(u->pcm_handle);
541
542 /* Let's update the time smoother */
543
544 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
545 pa_log_warn("Failed to query DSP status data: %s", snd_strerror(err));
546 return;
547 }
548
549 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
550 pa_log_warn("Failed to get timestamp: %s", snd_strerror(err));
551 else {
552 snd_htimestamp_t htstamp = { 0, 0 };
553 snd_pcm_status_get_htstamp(status, &htstamp);
554 now1 = pa_timespec_load(&htstamp);
555 }
556
557 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
558
559 if (PA_UNLIKELY(position < 0))
560 position = 0;
561
562 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
563 if (now1 <= 0)
564 now1 = pa_rtclock_usec();
565
566 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
567
568 pa_smoother_put(u->smoother, now1, now2);
569 }
570
571 static pa_usec_t sink_get_latency(struct userdata *u) {
572 pa_usec_t r;
573 int64_t delay;
574 pa_usec_t now1, now2;
575
576 pa_assert(u);
577
578 now1 = pa_rtclock_usec();
579 now2 = pa_smoother_get(u->smoother, now1);
580
581 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
582
583 r = delay >= 0 ? (pa_usec_t) delay : 0;
584
585 if (u->memchunk.memblock)
586 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
587
588 return r;
589 }
590
591 static int build_pollfd(struct userdata *u) {
592 pa_assert(u);
593 pa_assert(u->pcm_handle);
594
595 if (u->alsa_rtpoll_item)
596 pa_rtpoll_item_free(u->alsa_rtpoll_item);
597
598 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
599 return -1;
600
601 return 0;
602 }
603
604 static int suspend(struct userdata *u) {
605 pa_assert(u);
606 pa_assert(u->pcm_handle);
607
608 pa_smoother_pause(u->smoother, pa_rtclock_usec());
609
610 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
611 * take awfully long with our long buffer sizes today. */
612 snd_pcm_close(u->pcm_handle);
613 u->pcm_handle = NULL;
614
615 if (u->alsa_rtpoll_item) {
616 pa_rtpoll_item_free(u->alsa_rtpoll_item);
617 u->alsa_rtpoll_item = NULL;
618 }
619
620 pa_log_info("Device suspended...");
621
622 return 0;
623 }
624
625 static int update_sw_params(struct userdata *u) {
626 snd_pcm_uframes_t avail_min;
627 int err;
628
629 pa_assert(u);
630
631 /* Use the full buffer if noone asked us for anything specific */
632 u->hwbuf_unused = 0;
633
634 if (u->use_tsched) {
635 pa_usec_t latency;
636
637 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
638 size_t b;
639
640 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
641
642 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
643
644 /* We need at least one sample in our buffer */
645
646 if (PA_UNLIKELY(b < u->frame_size))
647 b = u->frame_size;
648
649 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
650 }
651
652 fix_min_sleep_wakeup(u);
653 fix_tsched_watermark(u);
654 }
655
656 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
657
658 /* We need at last one frame in the used part of the buffer */
659 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
660
661 if (u->use_tsched) {
662 pa_usec_t sleep_usec, process_usec;
663
664 hw_sleep_time(u, &sleep_usec, &process_usec);
665 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
666 }
667
668 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
669
670 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
671 pa_log("Failed to set software parameters: %s", snd_strerror(err));
672 return err;
673 }
674
675 pa_sink_set_max_request(u->sink, u->hwbuf_size - u->hwbuf_unused);
676
677 return 0;
678 }
679
680 static int unsuspend(struct userdata *u) {
681 pa_sample_spec ss;
682 int err;
683 pa_bool_t b, d;
684 unsigned nfrags;
685 snd_pcm_uframes_t period_size;
686
687 pa_assert(u);
688 pa_assert(!u->pcm_handle);
689
690 pa_log_info("Trying resume...");
691
692 snd_config_update_free_global();
693 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
694 /*SND_PCM_NONBLOCK|*/
695 SND_PCM_NO_AUTO_RESAMPLE|
696 SND_PCM_NO_AUTO_CHANNELS|
697 SND_PCM_NO_AUTO_FORMAT)) < 0) {
698 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
699 goto fail;
700 }
701
702 ss = u->sink->sample_spec;
703 nfrags = u->nfragments;
704 period_size = u->fragment_size / u->frame_size;
705 b = u->use_mmap;
706 d = u->use_tsched;
707
708 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
709 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
710 goto fail;
711 }
712
713 if (b != u->use_mmap || d != u->use_tsched) {
714 pa_log_warn("Resume failed, couldn't get original access mode.");
715 goto fail;
716 }
717
718 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
719 pa_log_warn("Resume failed, couldn't restore original sample settings.");
720 goto fail;
721 }
722
723 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
724 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
725 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
726 (unsigned long) nfrags, period_size * u->frame_size);
727 goto fail;
728 }
729
730 if (update_sw_params(u) < 0)
731 goto fail;
732
733 if (build_pollfd(u) < 0)
734 goto fail;
735
736 u->first = TRUE;
737 u->since_start = 0;
738
739 pa_log_info("Resumed successfully...");
740
741 return 0;
742
743 fail:
744 if (u->pcm_handle) {
745 snd_pcm_close(u->pcm_handle);
746 u->pcm_handle = NULL;
747 }
748
749 return -1;
750 }
751
752 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
753 struct userdata *u = PA_SINK(o)->userdata;
754
755 switch (code) {
756
757 case PA_SINK_MESSAGE_GET_LATENCY: {
758 pa_usec_t r = 0;
759
760 if (u->pcm_handle)
761 r = sink_get_latency(u);
762
763 *((pa_usec_t*) data) = r;
764
765 return 0;
766 }
767
768 case PA_SINK_MESSAGE_SET_STATE:
769
770 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
771
772 case PA_SINK_SUSPENDED:
773 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
774
775 if (suspend(u) < 0)
776 return -1;
777
778 break;
779
780 case PA_SINK_IDLE:
781 case PA_SINK_RUNNING:
782
783 if (u->sink->thread_info.state == PA_SINK_INIT) {
784 if (build_pollfd(u) < 0)
785 return -1;
786 }
787
788 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
789 if (unsuspend(u) < 0)
790 return -1;
791 }
792
793 break;
794
795 case PA_SINK_UNLINKED:
796 case PA_SINK_INIT:
797 case PA_SINK_INVALID_STATE:
798 ;
799 }
800
801 break;
802 }
803
804 return pa_sink_process_msg(o, code, data, offset, chunk);
805 }
806
807 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
808 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
809
810 pa_assert(u);
811 pa_assert(u->mixer_handle);
812
813 if (mask == SND_CTL_EVENT_MASK_REMOVE)
814 return 0;
815
816 if (mask & SND_CTL_EVENT_MASK_VALUE) {
817 pa_sink_get_volume(u->sink, TRUE);
818 pa_sink_get_mute(u->sink, TRUE);
819 }
820
821 return 0;
822 }
823
824 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
825
826 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
827 (double) (u->hw_volume_max - u->hw_volume_min));
828 }
829
830 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
831 long alsa_vol;
832
833 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
834 / PA_VOLUME_NORM) + u->hw_volume_min;
835
836 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
837 }
838
839 static void sink_get_volume_cb(pa_sink *s) {
840 struct userdata *u = s->userdata;
841 int err;
842 unsigned i;
843 pa_cvolume r;
844 char t[PA_CVOLUME_SNPRINT_MAX];
845
846 pa_assert(u);
847 pa_assert(u->mixer_elem);
848
849 if (u->mixer_seperate_channels) {
850
851 r.channels = s->sample_spec.channels;
852
853 for (i = 0; i < s->sample_spec.channels; i++) {
854 long alsa_vol;
855
856 if (u->hw_dB_supported) {
857
858 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
859 goto fail;
860
861 #ifdef HAVE_VALGRIND_MEMCHECK_H
862 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
863 #endif
864
865 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
866 } else {
867
868 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
869 goto fail;
870
871 r.values[i] = from_alsa_volume(u, alsa_vol);
872 }
873 }
874
875 } else {
876 long alsa_vol;
877
878 if (u->hw_dB_supported) {
879
880 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
881 goto fail;
882
883 #ifdef HAVE_VALGRIND_MEMCHECK_H
884 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
885 #endif
886
887 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
888
889 } else {
890
891 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
892 goto fail;
893
894 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
895 }
896 }
897
898 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
899
900 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
901
902 s->virtual_volume = u->hardware_volume = r;
903
904 if (u->hw_dB_supported) {
905 pa_cvolume reset;
906
907 /* Hmm, so the hardware volume changed, let's reset our software volume */
908 pa_cvolume_reset(&reset, s->sample_spec.channels);
909 pa_sink_set_soft_volume(s, &reset);
910 }
911 }
912
913 return;
914
915 fail:
916 pa_log_error("Unable to read volume: %s", snd_strerror(err));
917 }
918
919 static void sink_set_volume_cb(pa_sink *s) {
920 struct userdata *u = s->userdata;
921 int err;
922 unsigned i;
923 pa_cvolume r;
924
925 pa_assert(u);
926 pa_assert(u->mixer_elem);
927
928 if (u->mixer_seperate_channels) {
929
930 r.channels = s->sample_spec.channels;
931
932 for (i = 0; i < s->sample_spec.channels; i++) {
933 long alsa_vol;
934 pa_volume_t vol;
935
936 vol = s->virtual_volume.values[i];
937
938 if (u->hw_dB_supported) {
939
940 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
941 alsa_vol += u->hw_dB_max;
942 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
943
944 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
945 goto fail;
946
947 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
948 goto fail;
949
950 #ifdef HAVE_VALGRIND_MEMCHECK_H
951 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
952 #endif
953
954 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
955
956 } else {
957 alsa_vol = to_alsa_volume(u, vol);
958
959 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
960 goto fail;
961
962 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
963 goto fail;
964
965 r.values[i] = from_alsa_volume(u, alsa_vol);
966 }
967 }
968
969 } else {
970 pa_volume_t vol;
971 long alsa_vol;
972
973 vol = pa_cvolume_max(&s->virtual_volume);
974
975 if (u->hw_dB_supported) {
976 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
977 alsa_vol += u->hw_dB_max;
978 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
979
980 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
981 goto fail;
982
983 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
984 goto fail;
985
986 #ifdef HAVE_VALGRIND_MEMCHECK_H
987 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
988 #endif
989
990 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
991
992 } else {
993 alsa_vol = to_alsa_volume(u, vol);
994
995 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
996 goto fail;
997
998 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
999 goto fail;
1000
1001 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
1002 }
1003 }
1004
1005 u->hardware_volume = r;
1006
1007 if (u->hw_dB_supported) {
1008 char t[PA_CVOLUME_SNPRINT_MAX];
1009
1010 /* Match exactly what the user requested by software */
1011 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1012
1013 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1014 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1015 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1016
1017 } else
1018
1019 /* We can't match exactly what the user requested, hence let's
1020 * at least tell the user about it */
1021
1022 s->virtual_volume = r;
1023
1024 return;
1025
1026 fail:
1027 pa_log_error("Unable to set volume: %s", snd_strerror(err));
1028 }
1029
1030 static void sink_get_mute_cb(pa_sink *s) {
1031 struct userdata *u = s->userdata;
1032 int err, sw;
1033
1034 pa_assert(u);
1035 pa_assert(u->mixer_elem);
1036
1037 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
1038 pa_log_error("Unable to get switch: %s", snd_strerror(err));
1039 return;
1040 }
1041
1042 s->muted = !sw;
1043 }
1044
1045 static void sink_set_mute_cb(pa_sink *s) {
1046 struct userdata *u = s->userdata;
1047 int err;
1048
1049 pa_assert(u);
1050 pa_assert(u->mixer_elem);
1051
1052 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
1053 pa_log_error("Unable to set switch: %s", snd_strerror(err));
1054 return;
1055 }
1056 }
1057
1058 static void sink_update_requested_latency_cb(pa_sink *s) {
1059 struct userdata *u = s->userdata;
1060 size_t before;
1061 pa_assert(u);
1062
1063 if (!u->pcm_handle)
1064 return;
1065
1066 before = u->hwbuf_unused;
1067 update_sw_params(u);
1068
1069 /* Let's check whether we now use only a smaller part of the
1070 buffer then before. If so, we need to make sure that subsequent
1071 rewinds are relative to the new maxium fill level and not to the
1072 current fill level. Thus, let's do a full rewind once, to clear
1073 things up. */
1074
1075 if (u->hwbuf_unused > before) {
1076 pa_log_debug("Requesting rewind due to latency change.");
1077 pa_sink_request_rewind(s, (size_t) -1);
1078 }
1079 }
1080
1081 static int process_rewind(struct userdata *u) {
1082 snd_pcm_sframes_t unused;
1083 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1084 pa_assert(u);
1085
1086 /* Figure out how much we shall rewind and reset the counter */
1087 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1088 u->sink->thread_info.rewind_nbytes = 0;
1089
1090 if (rewind_nbytes <= 0)
1091 goto finish;
1092
1093 pa_assert(rewind_nbytes > 0);
1094 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1095
1096 snd_pcm_hwsync(u->pcm_handle);
1097 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
1098 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused));
1099 return -1;
1100 }
1101
1102 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1103
1104 if (u->hwbuf_size > unused_nbytes)
1105 limit_nbytes = u->hwbuf_size - unused_nbytes;
1106 else
1107 limit_nbytes = 0;
1108
1109 if (rewind_nbytes > limit_nbytes)
1110 rewind_nbytes = limit_nbytes;
1111
1112 if (rewind_nbytes > 0) {
1113 snd_pcm_sframes_t in_frames, out_frames;
1114
1115 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1116
1117 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1118 pa_log_debug("before: %lu", (unsigned long) in_frames);
1119 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1120 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1121 return -1;
1122 }
1123 pa_log_debug("after: %lu", (unsigned long) out_frames);
1124
1125 rewind_nbytes = (size_t) out_frames * u->frame_size;
1126
1127 if (rewind_nbytes <= 0)
1128 pa_log_info("Tried rewind, but was apparently not possible.");
1129 else {
1130 u->write_count -= out_frames * u->frame_size;
1131 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1132 pa_sink_process_rewind(u->sink, rewind_nbytes);
1133
1134 u->after_rewind = TRUE;
1135 return 0;
1136 }
1137 } else
1138 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1139
1140 finish:
1141
1142 pa_sink_process_rewind(u->sink, 0);
1143
1144 return 0;
1145
1146 }
1147
1148 static void thread_func(void *userdata) {
1149 struct userdata *u = userdata;
1150 unsigned short revents = 0;
1151
1152 pa_assert(u);
1153
1154 pa_log_debug("Thread starting up");
1155
1156 if (u->core->realtime_scheduling)
1157 pa_make_realtime(u->core->realtime_priority);
1158
1159 pa_thread_mq_install(&u->thread_mq);
1160 pa_rtpoll_install(u->rtpoll);
1161
1162 for (;;) {
1163 int ret;
1164
1165 #ifdef DEBUG_TIMING
1166 pa_log_debug("Loop");
1167 #endif
1168
1169 /* Render some data and write it to the dsp */
1170 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1171 int work_done;
1172 pa_usec_t sleep_usec = 0;
1173
1174 if (u->sink->thread_info.rewind_requested)
1175 if (process_rewind(u) < 0)
1176 goto fail;
1177
1178 if (u->use_mmap)
1179 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1180 else
1181 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1182
1183 if (work_done < 0)
1184 goto fail;
1185
1186 /* pa_log_debug("work_done = %i", work_done); */
1187
1188 if (work_done) {
1189
1190 if (u->first) {
1191 pa_log_info("Starting playback.");
1192 snd_pcm_start(u->pcm_handle);
1193
1194 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1195 }
1196
1197 update_smoother(u);
1198 }
1199
1200 if (u->use_tsched) {
1201 pa_usec_t cusec;
1202
1203 if (u->since_start <= u->hwbuf_size) {
1204
1205 /* USB devices on ALSA seem to hit a buffer
1206 * underrun during the first iterations much
1207 * quicker then we calculate here, probably due to
1208 * the transport latency. To accomodate for that
1209 * we artificially decrease the sleep time until
1210 * we have filled the buffer at least once
1211 * completely.*/
1212
1213 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1214 sleep_usec /= 2;
1215 }
1216
1217 /* OK, the playback buffer is now full, let's
1218 * calculate when to wake up next */
1219 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1220
1221 /* Convert from the sound card time domain to the
1222 * system time domain */
1223 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1224
1225 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1226
1227 /* We don't trust the conversion, so we wake up whatever comes first */
1228 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1229 }
1230
1231 u->first = FALSE;
1232 u->after_rewind = FALSE;
1233
1234 } else if (u->use_tsched)
1235
1236 /* OK, we're in an invalid state, let's disable our timers */
1237 pa_rtpoll_set_timer_disabled(u->rtpoll);
1238
1239 /* Hmm, nothing to do. Let's sleep */
1240 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1241 goto fail;
1242
1243 if (ret == 0)
1244 goto finish;
1245
1246 /* Tell ALSA about this and process its response */
1247 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1248 struct pollfd *pollfd;
1249 int err;
1250 unsigned n;
1251
1252 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1253
1254 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1255 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1256 goto fail;
1257 }
1258
1259 if (revents & ~POLLOUT) {
1260 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1261 goto fail;
1262
1263 u->first = TRUE;
1264 u->since_start = 0;
1265 } else if (revents && u->use_tsched && pa_log_ratelimit())
1266 pa_log_debug("Wakeup from ALSA!");
1267
1268 } else
1269 revents = 0;
1270 }
1271
1272 fail:
1273 /* If this was no regular exit from the loop we have to continue
1274 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1275 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1276 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1277
1278 finish:
1279 pa_log_debug("Thread shutting down");
1280 }
1281
1282 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name) {
1283 const char *n;
1284 char *t;
1285
1286 pa_assert(data);
1287 pa_assert(ma);
1288 pa_assert(device_name);
1289
1290 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1291 pa_sink_new_data_set_name(data, n);
1292 data->namereg_fail = TRUE;
1293 return;
1294 }
1295
1296 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1297 data->namereg_fail = TRUE;
1298 else {
1299 n = device_id ? device_id : device_name;
1300 data->namereg_fail = FALSE;
1301 }
1302
1303 t = pa_sprintf_malloc("alsa_output.%s", n);
1304 pa_sink_new_data_set_name(data, t);
1305 pa_xfree(t);
1306 }
1307
1308 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1309 pa_assert(u);
1310
1311 if (!u->mixer_handle)
1312 return 0;
1313
1314 pa_assert(u->mixer_elem);
1315
1316 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1317 pa_bool_t suitable = FALSE;
1318
1319 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1320 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1321 else if (u->hw_volume_min >= u->hw_volume_max)
1322 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1323 else {
1324 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1325 suitable = TRUE;
1326 }
1327
1328 if (suitable) {
1329 if (ignore_dB || snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1330 pa_log_info("Mixer doesn't support dB information or data is ignored.");
1331 else {
1332 #ifdef HAVE_VALGRIND_MEMCHECK_H
1333 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1334 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1335 #endif
1336
1337 if (u->hw_dB_min >= u->hw_dB_max)
1338 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1339 else {
1340 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1341 u->hw_dB_supported = TRUE;
1342
1343 if (u->hw_dB_max > 0) {
1344 u->sink->base_volume = pa_sw_volume_from_dB(- (double) u->hw_dB_max/100.0);
1345 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1346 } else
1347 pa_log_info("No particular base volume set, fixing to 0 dB");
1348 }
1349 }
1350
1351 if (!u->hw_dB_supported &&
1352 u->hw_volume_max - u->hw_volume_min < 3) {
1353
1354 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1355 suitable = FALSE;
1356 }
1357 }
1358
1359 if (suitable) {
1360 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &u->sink->channel_map, u->mixer_map, TRUE) >= 0;
1361
1362 u->sink->get_volume = sink_get_volume_cb;
1363 u->sink->set_volume = sink_set_volume_cb;
1364 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1365 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1366
1367 if (!u->hw_dB_supported)
1368 u->sink->n_volume_steps = u->hw_volume_max - u->hw_volume_min + 1;
1369 } else
1370 pa_log_info("Using software volume control.");
1371 }
1372
1373 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1374 u->sink->get_mute = sink_get_mute_cb;
1375 u->sink->set_mute = sink_set_mute_cb;
1376 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1377 } else
1378 pa_log_info("Using software mute control.");
1379
1380 u->mixer_fdl = pa_alsa_fdlist_new();
1381
1382 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1383 pa_log("Failed to initialize file descriptor monitoring");
1384 return -1;
1385 }
1386
1387 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1388 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1389
1390 return 0;
1391 }
1392
1393 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, const pa_alsa_profile_info *profile) {
1394
1395 struct userdata *u = NULL;
1396 const char *dev_id = NULL;
1397 pa_sample_spec ss;
1398 pa_channel_map map;
1399 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1400 snd_pcm_uframes_t period_frames, tsched_frames;
1401 size_t frame_size;
1402 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1403 pa_usec_t usec;
1404 pa_sink_new_data data;
1405
1406 pa_assert(m);
1407 pa_assert(ma);
1408
1409 ss = m->core->default_sample_spec;
1410 map = m->core->default_channel_map;
1411 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1412 pa_log("Failed to parse sample specification and channel map");
1413 goto fail;
1414 }
1415
1416 frame_size = pa_frame_size(&ss);
1417
1418 nfrags = m->core->default_n_fragments;
1419 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1420 if (frag_size <= 0)
1421 frag_size = (uint32_t) frame_size;
1422 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1423 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1424
1425 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1426 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1427 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1428 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1429 pa_log("Failed to parse buffer metrics");
1430 goto fail;
1431 }
1432
1433 hwbuf_size = frag_size * nfrags;
1434 period_frames = frag_size/frame_size;
1435 tsched_frames = tsched_size/frame_size;
1436
1437 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1438 pa_log("Failed to parse mmap argument.");
1439 goto fail;
1440 }
1441
1442 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1443 pa_log("Failed to parse tsched argument.");
1444 goto fail;
1445 }
1446
1447 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1448 pa_log("Failed to parse ignore_dB argument.");
1449 goto fail;
1450 }
1451
1452 if (use_tsched && !pa_rtclock_hrtimer()) {
1453 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1454 use_tsched = FALSE;
1455 }
1456
1457 u = pa_xnew0(struct userdata, 1);
1458 u->core = m->core;
1459 u->module = m;
1460 u->use_mmap = use_mmap;
1461 u->use_tsched = use_tsched;
1462 u->first = TRUE;
1463 u->rtpoll = pa_rtpoll_new();
1464 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1465
1466 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1467 usec = pa_rtclock_usec();
1468 pa_smoother_set_time_offset(u->smoother, usec);
1469 pa_smoother_pause(u->smoother, usec);
1470
1471 b = use_mmap;
1472 d = use_tsched;
1473
1474 if (profile) {
1475
1476 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1477 pa_log("device_id= not set");
1478 goto fail;
1479 }
1480
1481 if (!(u->pcm_handle = pa_alsa_open_by_device_id_profile(
1482 dev_id,
1483 &u->device_name,
1484 &ss, &map,
1485 SND_PCM_STREAM_PLAYBACK,
1486 &nfrags, &period_frames, tsched_frames,
1487 &b, &d, profile)))
1488
1489 goto fail;
1490
1491 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1492
1493 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1494 dev_id,
1495 &u->device_name,
1496 &ss, &map,
1497 SND_PCM_STREAM_PLAYBACK,
1498 &nfrags, &period_frames, tsched_frames,
1499 &b, &d, &profile)))
1500
1501 goto fail;
1502
1503 } else {
1504
1505 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1506 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1507 &u->device_name,
1508 &ss, &map,
1509 SND_PCM_STREAM_PLAYBACK,
1510 &nfrags, &period_frames, tsched_frames,
1511 &b, &d, FALSE)))
1512 goto fail;
1513
1514 }
1515
1516 pa_assert(u->device_name);
1517 pa_log_info("Successfully opened device %s.", u->device_name);
1518
1519 if (profile)
1520 pa_log_info("Selected configuration '%s' (%s).", profile->description, profile->name);
1521
1522 if (use_mmap && !b) {
1523 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1524 u->use_mmap = use_mmap = FALSE;
1525 }
1526
1527 if (use_tsched && (!b || !d)) {
1528 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1529 u->use_tsched = use_tsched = FALSE;
1530 }
1531
1532 if (u->use_mmap)
1533 pa_log_info("Successfully enabled mmap() mode.");
1534
1535 if (u->use_tsched)
1536 pa_log_info("Successfully enabled timer-based scheduling mode.");
1537
1538 /* ALSA might tweak the sample spec, so recalculate the frame size */
1539 frame_size = pa_frame_size(&ss);
1540
1541 pa_alsa_find_mixer_and_elem(u->pcm_handle, &u->mixer_handle, &u->mixer_elem);
1542
1543 pa_sink_new_data_init(&data);
1544 data.driver = driver;
1545 data.module = m;
1546 data.card = card;
1547 set_sink_name(&data, ma, dev_id, u->device_name);
1548 pa_sink_new_data_set_sample_spec(&data, &ss);
1549 pa_sink_new_data_set_channel_map(&data, &map);
1550
1551 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1552 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1553 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1554 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1555 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1556
1557 if (profile) {
1558 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, profile->name);
1559 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, profile->description);
1560 }
1561
1562 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1563 pa_sink_new_data_done(&data);
1564
1565 if (!u->sink) {
1566 pa_log("Failed to create sink object");
1567 goto fail;
1568 }
1569
1570 u->sink->parent.process_msg = sink_process_msg;
1571 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1572 u->sink->userdata = u;
1573
1574 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1575 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1576
1577 u->frame_size = frame_size;
1578 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1579 u->nfragments = nfrags;
1580 u->hwbuf_size = u->fragment_size * nfrags;
1581 u->tsched_watermark = tsched_watermark;
1582 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1583
1584 if (use_tsched) {
1585 fix_min_sleep_wakeup(u);
1586 fix_tsched_watermark(u);
1587 }
1588
1589 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1590 u->sink->thread_info.max_request = u->hwbuf_size;
1591
1592 pa_sink_set_latency_range(u->sink,
1593 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1594 pa_bytes_to_usec(u->hwbuf_size, &ss));
1595
1596 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1597 nfrags, (long unsigned) u->fragment_size,
1598 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1599
1600 if (use_tsched)
1601 pa_log_info("Time scheduling watermark is %0.2fms",
1602 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1603
1604 if (update_sw_params(u) < 0)
1605 goto fail;
1606
1607 if (setup_mixer(u, ignore_dB) < 0)
1608 goto fail;
1609
1610 pa_alsa_dump(u->pcm_handle);
1611
1612 if (!(u->thread = pa_thread_new(thread_func, u))) {
1613 pa_log("Failed to create thread.");
1614 goto fail;
1615 }
1616
1617 /* Get initial mixer settings */
1618 if (data.volume_is_set) {
1619 if (u->sink->set_volume)
1620 u->sink->set_volume(u->sink);
1621 } else {
1622 if (u->sink->get_volume)
1623 u->sink->get_volume(u->sink);
1624 }
1625
1626 if (data.muted_is_set) {
1627 if (u->sink->set_mute)
1628 u->sink->set_mute(u->sink);
1629 } else {
1630 if (u->sink->get_mute)
1631 u->sink->get_mute(u->sink);
1632 }
1633
1634 pa_sink_put(u->sink);
1635
1636 return u->sink;
1637
1638 fail:
1639
1640 userdata_free(u);
1641
1642 return NULL;
1643 }
1644
1645 static void userdata_free(struct userdata *u) {
1646 pa_assert(u);
1647
1648 if (u->sink)
1649 pa_sink_unlink(u->sink);
1650
1651 if (u->thread) {
1652 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1653 pa_thread_free(u->thread);
1654 }
1655
1656 pa_thread_mq_done(&u->thread_mq);
1657
1658 if (u->sink)
1659 pa_sink_unref(u->sink);
1660
1661 if (u->memchunk.memblock)
1662 pa_memblock_unref(u->memchunk.memblock);
1663
1664 if (u->alsa_rtpoll_item)
1665 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1666
1667 if (u->rtpoll)
1668 pa_rtpoll_free(u->rtpoll);
1669
1670 if (u->mixer_fdl)
1671 pa_alsa_fdlist_free(u->mixer_fdl);
1672
1673 if (u->mixer_handle)
1674 snd_mixer_close(u->mixer_handle);
1675
1676 if (u->pcm_handle) {
1677 snd_pcm_drop(u->pcm_handle);
1678 snd_pcm_close(u->pcm_handle);
1679 }
1680
1681 if (u->smoother)
1682 pa_smoother_free(u->smoother);
1683
1684 pa_xfree(u->device_name);
1685 pa_xfree(u);
1686 }
1687
1688 void pa_alsa_sink_free(pa_sink *s) {
1689 struct userdata *u;
1690
1691 pa_sink_assert_ref(s);
1692 pa_assert_se(u = s->userdata);
1693
1694 userdata_free(u);
1695 }