]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
introduce default channel map in addition to the default sample spec
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38 #include <pulse/i18n.h>
39
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/rtclock.h>
54 #include <pulsecore/time-smoother.h>
55
56 #include "alsa-util.h"
57 #include "alsa-sink.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
65 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
66
67 struct userdata {
68 pa_core *core;
69 pa_module *module;
70 pa_sink *sink;
71
72 pa_thread *thread;
73 pa_thread_mq thread_mq;
74 pa_rtpoll *rtpoll;
75
76 snd_pcm_t *pcm_handle;
77
78 pa_alsa_fdlist *mixer_fdl;
79 snd_mixer_t *mixer_handle;
80 snd_mixer_elem_t *mixer_elem;
81 long hw_volume_max, hw_volume_min;
82 long hw_dB_max, hw_dB_min;
83 pa_bool_t hw_dB_supported:1;
84 pa_bool_t mixer_seperate_channels:1;
85 pa_cvolume hardware_volume;
86
87 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark, hwbuf_unused, min_sleep, min_wakeup;
88 unsigned nfragments;
89 pa_memchunk memchunk;
90
91 char *device_name;
92
93 pa_bool_t use_mmap:1, use_tsched:1;
94
95 pa_bool_t first, after_rewind;
96
97 pa_rtpoll_item *alsa_rtpoll_item;
98
99 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
100
101 pa_smoother *smoother;
102 uint64_t write_count;
103 uint64_t since_start;
104 };
105
106 static void userdata_free(struct userdata *u);
107
108 static void fix_min_sleep_wakeup(struct userdata *u) {
109 size_t max_use, max_use_2;
110
111 pa_assert(u);
112
113 max_use = u->hwbuf_size - u->hwbuf_unused;
114 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
115
116 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
117 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
118
119 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
120 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
121 }
122
123 static void fix_tsched_watermark(struct userdata *u) {
124 size_t max_use;
125 pa_assert(u);
126
127 max_use = u->hwbuf_size - u->hwbuf_unused;
128
129 if (u->tsched_watermark > max_use - u->min_sleep)
130 u->tsched_watermark = max_use - u->min_sleep;
131
132 if (u->tsched_watermark < u->min_wakeup)
133 u->tsched_watermark = u->min_wakeup;
134 }
135
136 static void adjust_after_underrun(struct userdata *u) {
137 size_t old_watermark;
138 pa_usec_t old_min_latency, new_min_latency;
139
140 pa_assert(u);
141
142 /* First, just try to increase the watermark */
143 old_watermark = u->tsched_watermark;
144 u->tsched_watermark *= 2;
145 fix_tsched_watermark(u);
146
147 if (old_watermark != u->tsched_watermark) {
148 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
149 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
150 return;
151 }
152
153 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
154 old_min_latency = u->sink->thread_info.min_latency;
155 new_min_latency = PA_MIN(old_min_latency * 2, u->sink->thread_info.max_latency);
156
157 if (old_min_latency != new_min_latency) {
158 pa_log_notice("Increasing minimal latency to %0.2f ms",
159 (double) new_min_latency / PA_USEC_PER_MSEC);
160
161 pa_sink_update_latency_range(u->sink, new_min_latency, u->sink->thread_info.max_latency);
162 return;
163 }
164
165 /* When we reach this we're officialy fucked! */
166 }
167
168 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
169 pa_usec_t usec, wm;
170
171 pa_assert(sleep_usec);
172 pa_assert(process_usec);
173
174 pa_assert(u);
175
176 usec = pa_sink_get_requested_latency_within_thread(u->sink);
177
178 if (usec == (pa_usec_t) -1)
179 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
180
181 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
182
183 if (wm > usec)
184 wm = usec/2;
185
186 *sleep_usec = usec - wm;
187 *process_usec = wm;
188
189 #ifdef DEBUG_TIMING
190 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
191 (unsigned long) (usec / PA_USEC_PER_MSEC),
192 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
193 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
194 #endif
195 }
196
197 static int try_recover(struct userdata *u, const char *call, int err) {
198 pa_assert(u);
199 pa_assert(call);
200 pa_assert(err < 0);
201
202 pa_log_debug("%s: %s", call, snd_strerror(err));
203
204 pa_assert(err != -EAGAIN);
205
206 if (err == -EPIPE)
207 pa_log_debug("%s: Buffer underrun!", call);
208
209 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
210 pa_log("%s: %s", call, snd_strerror(err));
211 return -1;
212 }
213
214 u->first = TRUE;
215 u->since_start = 0;
216 return 0;
217 }
218
219 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
220 size_t left_to_play;
221
222 /* We use <= instead of < for this check here because an underrun
223 * only happens after the last sample was processed, not already when
224 * it is removed from the buffer. This is particularly important
225 * when block transfer is used. */
226
227 if (n_bytes <= u->hwbuf_size) {
228 left_to_play = u->hwbuf_size - n_bytes;
229
230 #ifdef DEBUG_TIMING
231 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
232 #endif
233
234 } else {
235 left_to_play = 0;
236
237 #ifdef DEBUG_TIMING
238 PA_DEBUG_TRAP;
239 #endif
240
241 if (!u->first && !u->after_rewind) {
242
243 if (pa_log_ratelimit())
244 pa_log_info("Underrun!");
245
246 if (u->use_tsched)
247 adjust_after_underrun(u);
248 }
249 }
250
251 return left_to_play;
252 }
253
254 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
255 pa_bool_t work_done = TRUE;
256 pa_usec_t max_sleep_usec = 0, process_usec = 0;
257 size_t left_to_play;
258
259 pa_assert(u);
260 pa_sink_assert_ref(u->sink);
261
262 if (u->use_tsched)
263 hw_sleep_time(u, &max_sleep_usec, &process_usec);
264
265 for (;;) {
266 snd_pcm_sframes_t n;
267 size_t n_bytes;
268 int r;
269
270 /* First we determine how many samples are missing to fill the
271 * buffer up to 100% */
272
273 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
274
275 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
276 continue;
277
278 return r;
279 }
280
281 n_bytes = (size_t) n * u->frame_size;
282
283 #ifdef DEBUG_TIMING
284 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
285 #endif
286
287 left_to_play = check_left_to_play(u, n_bytes);
288
289 if (u->use_tsched)
290
291 /* We won't fill up the playback buffer before at least
292 * half the sleep time is over because otherwise we might
293 * ask for more data from the clients then they expect. We
294 * need to guarantee that clients only have to keep around
295 * a single hw buffer length. */
296
297 if (!polled &&
298 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
299 #ifdef DEBUG_TIMING
300 pa_log_debug("Not filling up, because too early.");
301 #endif
302 break;
303 }
304
305 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
306
307 if (polled && pa_log_ratelimit())
308 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
309 "Most likely this is an ALSA driver bug. Please report this issue to the ALSA developers. "
310 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail_update() returned 0."));
311
312 #ifdef DEBUG_TIMING
313 pa_log_debug("Not filling up, because not necessary.");
314 #endif
315 break;
316 }
317
318 n_bytes -= u->hwbuf_unused;
319 polled = FALSE;
320
321 #ifdef DEBUG_TIMING
322 pa_log_debug("Filling up");
323 #endif
324
325 for (;;) {
326 pa_memchunk chunk;
327 void *p;
328 int err;
329 const snd_pcm_channel_area_t *areas;
330 snd_pcm_uframes_t offset, frames;
331 snd_pcm_sframes_t sframes;
332
333 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
334 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
335
336 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
337
338 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
339 continue;
340
341 return r;
342 }
343
344 /* Make sure that if these memblocks need to be copied they will fit into one slot */
345 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
346 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
347
348 /* Check these are multiples of 8 bit */
349 pa_assert((areas[0].first & 7) == 0);
350 pa_assert((areas[0].step & 7)== 0);
351
352 /* We assume a single interleaved memory buffer */
353 pa_assert((areas[0].first >> 3) == 0);
354 pa_assert((areas[0].step >> 3) == u->frame_size);
355
356 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
357
358 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
359 chunk.length = pa_memblock_get_length(chunk.memblock);
360 chunk.index = 0;
361
362 pa_sink_render_into_full(u->sink, &chunk);
363 pa_memblock_unref_fixed(chunk.memblock);
364
365 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
366
367 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
368 continue;
369
370 return r;
371 }
372
373 work_done = TRUE;
374
375 u->write_count += frames * u->frame_size;
376 u->since_start += frames * u->frame_size;
377
378 #ifdef DEBUG_TIMING
379 pa_log_debug("Wrote %lu bytes", (unsigned long) (frames * u->frame_size));
380 #endif
381
382 if ((size_t) frames * u->frame_size >= n_bytes)
383 break;
384
385 n_bytes -= (size_t) frames * u->frame_size;
386 }
387 }
388
389 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
390 return work_done ? 1 : 0;
391 }
392
393 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
394 pa_bool_t work_done = FALSE;
395 pa_usec_t max_sleep_usec = 0, process_usec = 0;
396 size_t left_to_play;
397
398 pa_assert(u);
399 pa_sink_assert_ref(u->sink);
400
401 if (u->use_tsched)
402 hw_sleep_time(u, &max_sleep_usec, &process_usec);
403
404 for (;;) {
405 snd_pcm_sframes_t n;
406 size_t n_bytes;
407 int r;
408
409 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
410
411 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
412 continue;
413
414 return r;
415 }
416
417 n_bytes = (size_t) n * u->frame_size;
418 left_to_play = check_left_to_play(u, n_bytes);
419
420 if (u->use_tsched)
421
422 /* We won't fill up the playback buffer before at least
423 * half the sleep time is over because otherwise we might
424 * ask for more data from the clients then they expect. We
425 * need to guarantee that clients only have to keep around
426 * a single hw buffer length. */
427
428 if (!polled &&
429 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
430 break;
431
432 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
433
434 if (polled && pa_log_ratelimit())
435 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
436 "Most likely this is an ALSA driver bug. Please report this issue to the ALSA developers. "
437 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail_update() returned 0."));
438
439 break;
440 }
441
442 n_bytes -= u->hwbuf_unused;
443 polled = FALSE;
444
445 for (;;) {
446 snd_pcm_sframes_t frames;
447 void *p;
448
449 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
450
451 if (u->memchunk.length <= 0)
452 pa_sink_render(u->sink, n_bytes, &u->memchunk);
453
454 pa_assert(u->memchunk.length > 0);
455
456 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
457
458 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
459 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
460
461 p = pa_memblock_acquire(u->memchunk.memblock);
462 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
463 pa_memblock_release(u->memchunk.memblock);
464
465 pa_assert(frames != 0);
466
467 if (PA_UNLIKELY(frames < 0)) {
468
469 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
470 continue;
471
472 return r;
473 }
474
475 u->memchunk.index += (size_t) frames * u->frame_size;
476 u->memchunk.length -= (size_t) frames * u->frame_size;
477
478 if (u->memchunk.length <= 0) {
479 pa_memblock_unref(u->memchunk.memblock);
480 pa_memchunk_reset(&u->memchunk);
481 }
482
483 work_done = TRUE;
484
485 u->write_count += frames * u->frame_size;
486 u->since_start += frames * u->frame_size;
487
488 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
489
490 if ((size_t) frames * u->frame_size >= n_bytes)
491 break;
492
493 n_bytes -= (size_t) frames * u->frame_size;
494 }
495 }
496
497 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
498 return work_done ? 1 : 0;
499 }
500
501 static void update_smoother(struct userdata *u) {
502 snd_pcm_sframes_t delay = 0;
503 int64_t position;
504 int err;
505 pa_usec_t now1 = 0, now2;
506 snd_pcm_status_t *status;
507
508 snd_pcm_status_alloca(&status);
509
510 pa_assert(u);
511 pa_assert(u->pcm_handle);
512
513 /* Let's update the time smoother */
514
515 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
516 pa_log_warn("Failed to query DSP status data: %s", snd_strerror(err));
517 return;
518 }
519
520 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
521 pa_log_warn("Failed to get timestamp: %s", snd_strerror(err));
522 else {
523 snd_htimestamp_t htstamp = { 0, 0 };
524 snd_pcm_status_get_htstamp(status, &htstamp);
525 now1 = pa_timespec_load(&htstamp);
526 }
527
528 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
529
530 if (PA_UNLIKELY(position < 0))
531 position = 0;
532
533 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
534 if (now1 <= 0)
535 now1 = pa_rtclock_usec();
536
537 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
538
539 pa_smoother_put(u->smoother, now1, now2);
540 }
541
542 static pa_usec_t sink_get_latency(struct userdata *u) {
543 pa_usec_t r;
544 int64_t delay;
545 pa_usec_t now1, now2;
546
547 pa_assert(u);
548
549 now1 = pa_rtclock_usec();
550 now2 = pa_smoother_get(u->smoother, now1);
551
552 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
553
554 r = delay >= 0 ? (pa_usec_t) delay : 0;
555
556 if (u->memchunk.memblock)
557 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
558
559 return r;
560 }
561
562 static int build_pollfd(struct userdata *u) {
563 pa_assert(u);
564 pa_assert(u->pcm_handle);
565
566 if (u->alsa_rtpoll_item)
567 pa_rtpoll_item_free(u->alsa_rtpoll_item);
568
569 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
570 return -1;
571
572 return 0;
573 }
574
575 static int suspend(struct userdata *u) {
576 pa_assert(u);
577 pa_assert(u->pcm_handle);
578
579 pa_smoother_pause(u->smoother, pa_rtclock_usec());
580
581 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
582 * take awfully long with our long buffer sizes today. */
583 snd_pcm_close(u->pcm_handle);
584 u->pcm_handle = NULL;
585
586 if (u->alsa_rtpoll_item) {
587 pa_rtpoll_item_free(u->alsa_rtpoll_item);
588 u->alsa_rtpoll_item = NULL;
589 }
590
591 pa_log_info("Device suspended...");
592
593 return 0;
594 }
595
596 static int update_sw_params(struct userdata *u) {
597 snd_pcm_uframes_t avail_min;
598 int err;
599
600 pa_assert(u);
601
602 /* Use the full buffer if noone asked us for anything specific */
603 u->hwbuf_unused = 0;
604
605 if (u->use_tsched) {
606 pa_usec_t latency;
607
608 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
609 size_t b;
610
611 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
612
613 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
614
615 /* We need at least one sample in our buffer */
616
617 if (PA_UNLIKELY(b < u->frame_size))
618 b = u->frame_size;
619
620 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
621 }
622
623 fix_min_sleep_wakeup(u);
624 fix_tsched_watermark(u);
625 }
626
627 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
628
629 /* We need at last one frame in the used part of the buffer */
630 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
631
632 if (u->use_tsched) {
633 pa_usec_t sleep_usec, process_usec;
634
635 hw_sleep_time(u, &sleep_usec, &process_usec);
636 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
637 }
638
639 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
640
641 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
642 pa_log("Failed to set software parameters: %s", snd_strerror(err));
643 return err;
644 }
645
646 pa_sink_set_max_request(u->sink, u->hwbuf_size - u->hwbuf_unused);
647
648 return 0;
649 }
650
651 static int unsuspend(struct userdata *u) {
652 pa_sample_spec ss;
653 int err;
654 pa_bool_t b, d;
655 unsigned nfrags;
656 snd_pcm_uframes_t period_size;
657
658 pa_assert(u);
659 pa_assert(!u->pcm_handle);
660
661 pa_log_info("Trying resume...");
662
663 snd_config_update_free_global();
664 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
665 /*SND_PCM_NONBLOCK|*/
666 SND_PCM_NO_AUTO_RESAMPLE|
667 SND_PCM_NO_AUTO_CHANNELS|
668 SND_PCM_NO_AUTO_FORMAT)) < 0) {
669 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
670 goto fail;
671 }
672
673 ss = u->sink->sample_spec;
674 nfrags = u->nfragments;
675 period_size = u->fragment_size / u->frame_size;
676 b = u->use_mmap;
677 d = u->use_tsched;
678
679 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
680 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
681 goto fail;
682 }
683
684 if (b != u->use_mmap || d != u->use_tsched) {
685 pa_log_warn("Resume failed, couldn't get original access mode.");
686 goto fail;
687 }
688
689 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
690 pa_log_warn("Resume failed, couldn't restore original sample settings.");
691 goto fail;
692 }
693
694 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
695 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
696 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
697 (unsigned long) nfrags, period_size * u->frame_size);
698 goto fail;
699 }
700
701 if (update_sw_params(u) < 0)
702 goto fail;
703
704 if (build_pollfd(u) < 0)
705 goto fail;
706
707 u->first = TRUE;
708 u->since_start = 0;
709
710 pa_log_info("Resumed successfully...");
711
712 return 0;
713
714 fail:
715 if (u->pcm_handle) {
716 snd_pcm_close(u->pcm_handle);
717 u->pcm_handle = NULL;
718 }
719
720 return -1;
721 }
722
723 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
724 struct userdata *u = PA_SINK(o)->userdata;
725
726 switch (code) {
727
728 case PA_SINK_MESSAGE_GET_LATENCY: {
729 pa_usec_t r = 0;
730
731 if (u->pcm_handle)
732 r = sink_get_latency(u);
733
734 *((pa_usec_t*) data) = r;
735
736 return 0;
737 }
738
739 case PA_SINK_MESSAGE_SET_STATE:
740
741 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
742
743 case PA_SINK_SUSPENDED:
744 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
745
746 if (suspend(u) < 0)
747 return -1;
748
749 break;
750
751 case PA_SINK_IDLE:
752 case PA_SINK_RUNNING:
753
754 if (u->sink->thread_info.state == PA_SINK_INIT) {
755 if (build_pollfd(u) < 0)
756 return -1;
757 }
758
759 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
760 if (unsuspend(u) < 0)
761 return -1;
762 }
763
764 break;
765
766 case PA_SINK_UNLINKED:
767 case PA_SINK_INIT:
768 case PA_SINK_INVALID_STATE:
769 ;
770 }
771
772 break;
773 }
774
775 return pa_sink_process_msg(o, code, data, offset, chunk);
776 }
777
778 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
779 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
780
781 pa_assert(u);
782 pa_assert(u->mixer_handle);
783
784 if (mask == SND_CTL_EVENT_MASK_REMOVE)
785 return 0;
786
787 if (mask & SND_CTL_EVENT_MASK_VALUE) {
788 pa_sink_get_volume(u->sink, TRUE);
789 pa_sink_get_mute(u->sink, TRUE);
790 }
791
792 return 0;
793 }
794
795 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
796
797 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
798 (double) (u->hw_volume_max - u->hw_volume_min));
799 }
800
801 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
802 long alsa_vol;
803
804 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
805 / PA_VOLUME_NORM) + u->hw_volume_min;
806
807 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
808 }
809
810 static void sink_get_volume_cb(pa_sink *s) {
811 struct userdata *u = s->userdata;
812 int err;
813 unsigned i;
814 pa_cvolume r;
815 char t[PA_CVOLUME_SNPRINT_MAX];
816
817 pa_assert(u);
818 pa_assert(u->mixer_elem);
819
820 if (u->mixer_seperate_channels) {
821
822 r.channels = s->sample_spec.channels;
823
824 for (i = 0; i < s->sample_spec.channels; i++) {
825 long alsa_vol;
826
827 if (u->hw_dB_supported) {
828
829 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
830 goto fail;
831
832 #ifdef HAVE_VALGRIND_MEMCHECK_H
833 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
834 #endif
835
836 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
837 } else {
838
839 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
840 goto fail;
841
842 r.values[i] = from_alsa_volume(u, alsa_vol);
843 }
844 }
845
846 } else {
847 long alsa_vol;
848
849 if (u->hw_dB_supported) {
850
851 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
852 goto fail;
853
854 #ifdef HAVE_VALGRIND_MEMCHECK_H
855 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
856 #endif
857
858 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
859
860 } else {
861
862 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
863 goto fail;
864
865 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
866 }
867 }
868
869 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
870
871 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
872
873 s->virtual_volume = u->hardware_volume = r;
874
875 if (u->hw_dB_supported) {
876 pa_cvolume reset;
877
878 /* Hmm, so the hardware volume changed, let's reset our software volume */
879 pa_cvolume_reset(&reset, s->sample_spec.channels);
880 pa_sink_set_soft_volume(s, &reset);
881 }
882 }
883
884 return;
885
886 fail:
887 pa_log_error("Unable to read volume: %s", snd_strerror(err));
888 }
889
890 static void sink_set_volume_cb(pa_sink *s) {
891 struct userdata *u = s->userdata;
892 int err;
893 unsigned i;
894 pa_cvolume r;
895
896 pa_assert(u);
897 pa_assert(u->mixer_elem);
898
899 if (u->mixer_seperate_channels) {
900
901 r.channels = s->sample_spec.channels;
902
903 for (i = 0; i < s->sample_spec.channels; i++) {
904 long alsa_vol;
905 pa_volume_t vol;
906
907 vol = s->virtual_volume.values[i];
908
909 if (u->hw_dB_supported) {
910
911 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
912 alsa_vol += u->hw_dB_max;
913 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
914
915 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
916 goto fail;
917
918 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
919 goto fail;
920
921 #ifdef HAVE_VALGRIND_MEMCHECK_H
922 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
923 #endif
924
925 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
926
927 } else {
928 alsa_vol = to_alsa_volume(u, vol);
929
930 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
931 goto fail;
932
933 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
934 goto fail;
935
936 r.values[i] = from_alsa_volume(u, alsa_vol);
937 }
938 }
939
940 } else {
941 pa_volume_t vol;
942 long alsa_vol;
943
944 vol = pa_cvolume_max(&s->virtual_volume);
945
946 if (u->hw_dB_supported) {
947 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
948 alsa_vol += u->hw_dB_max;
949 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
950
951 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
952 goto fail;
953
954 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
955 goto fail;
956
957 #ifdef HAVE_VALGRIND_MEMCHECK_H
958 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
959 #endif
960
961 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
962
963 } else {
964 alsa_vol = to_alsa_volume(u, vol);
965
966 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
967 goto fail;
968
969 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
970 goto fail;
971
972 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
973 }
974 }
975
976 u->hardware_volume = r;
977
978 if (u->hw_dB_supported) {
979 char t[PA_CVOLUME_SNPRINT_MAX];
980
981 /* Match exactly what the user requested by software */
982 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
983
984 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
985 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
986 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
987
988 } else
989
990 /* We can't match exactly what the user requested, hence let's
991 * at least tell the user about it */
992
993 s->virtual_volume = r;
994
995 return;
996
997 fail:
998 pa_log_error("Unable to set volume: %s", snd_strerror(err));
999 }
1000
1001 static void sink_get_mute_cb(pa_sink *s) {
1002 struct userdata *u = s->userdata;
1003 int err, sw;
1004
1005 pa_assert(u);
1006 pa_assert(u->mixer_elem);
1007
1008 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
1009 pa_log_error("Unable to get switch: %s", snd_strerror(err));
1010 return;
1011 }
1012
1013 s->muted = !sw;
1014 }
1015
1016 static void sink_set_mute_cb(pa_sink *s) {
1017 struct userdata *u = s->userdata;
1018 int err;
1019
1020 pa_assert(u);
1021 pa_assert(u->mixer_elem);
1022
1023 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
1024 pa_log_error("Unable to set switch: %s", snd_strerror(err));
1025 return;
1026 }
1027 }
1028
1029 static void sink_update_requested_latency_cb(pa_sink *s) {
1030 struct userdata *u = s->userdata;
1031 size_t before;
1032 pa_assert(u);
1033
1034 if (!u->pcm_handle)
1035 return;
1036
1037 before = u->hwbuf_unused;
1038 update_sw_params(u);
1039
1040 /* Let's check whether we now use only a smaller part of the
1041 buffer then before. If so, we need to make sure that subsequent
1042 rewinds are relative to the new maxium fill level and not to the
1043 current fill level. Thus, let's do a full rewind once, to clear
1044 things up. */
1045
1046 if (u->hwbuf_unused > before) {
1047 pa_log_debug("Requesting rewind due to latency change.");
1048 pa_sink_request_rewind(s, (size_t) -1);
1049 }
1050 }
1051
1052 static int process_rewind(struct userdata *u) {
1053 snd_pcm_sframes_t unused;
1054 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1055 pa_assert(u);
1056
1057 /* Figure out how much we shall rewind and reset the counter */
1058 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1059 u->sink->thread_info.rewind_nbytes = 0;
1060
1061 if (rewind_nbytes <= 0)
1062 goto finish;
1063
1064 pa_assert(rewind_nbytes > 0);
1065 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1066
1067 snd_pcm_hwsync(u->pcm_handle);
1068 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
1069 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused));
1070 return -1;
1071 }
1072
1073 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1074
1075 if (u->hwbuf_size > unused_nbytes)
1076 limit_nbytes = u->hwbuf_size - unused_nbytes;
1077 else
1078 limit_nbytes = 0;
1079
1080 if (rewind_nbytes > limit_nbytes)
1081 rewind_nbytes = limit_nbytes;
1082
1083 if (rewind_nbytes > 0) {
1084 snd_pcm_sframes_t in_frames, out_frames;
1085
1086 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1087
1088 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1089 pa_log_debug("before: %lu", (unsigned long) in_frames);
1090 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1091 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1092 return -1;
1093 }
1094 pa_log_debug("after: %lu", (unsigned long) out_frames);
1095
1096 rewind_nbytes = (size_t) out_frames * u->frame_size;
1097
1098 if (rewind_nbytes <= 0)
1099 pa_log_info("Tried rewind, but was apparently not possible.");
1100 else {
1101 u->write_count -= out_frames * u->frame_size;
1102 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1103 pa_sink_process_rewind(u->sink, rewind_nbytes);
1104
1105 u->after_rewind = TRUE;
1106 return 0;
1107 }
1108 } else
1109 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1110
1111 finish:
1112
1113 pa_sink_process_rewind(u->sink, 0);
1114
1115 return 0;
1116
1117 }
1118
1119 static void thread_func(void *userdata) {
1120 struct userdata *u = userdata;
1121 unsigned short revents = 0;
1122
1123 pa_assert(u);
1124
1125 pa_log_debug("Thread starting up");
1126
1127 if (u->core->realtime_scheduling)
1128 pa_make_realtime(u->core->realtime_priority);
1129
1130 pa_thread_mq_install(&u->thread_mq);
1131 pa_rtpoll_install(u->rtpoll);
1132
1133 for (;;) {
1134 int ret;
1135
1136 #ifdef DEBUG_TIMING
1137 pa_log_debug("Loop");
1138 #endif
1139
1140 /* Render some data and write it to the dsp */
1141 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1142 int work_done;
1143 pa_usec_t sleep_usec = 0;
1144
1145 if (u->sink->thread_info.rewind_requested)
1146 if (process_rewind(u) < 0)
1147 goto fail;
1148
1149 if (u->use_mmap)
1150 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1151 else
1152 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1153
1154 if (work_done < 0)
1155 goto fail;
1156
1157 /* pa_log_debug("work_done = %i", work_done); */
1158
1159 if (work_done) {
1160
1161 if (u->first) {
1162 pa_log_info("Starting playback.");
1163 snd_pcm_start(u->pcm_handle);
1164
1165 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1166 }
1167
1168 update_smoother(u);
1169 }
1170
1171 if (u->use_tsched) {
1172 pa_usec_t cusec;
1173
1174 if (u->since_start <= u->hwbuf_size) {
1175
1176 /* USB devices on ALSA seem to hit a buffer
1177 * underrun during the first iterations much
1178 * quicker then we calculate here, probably due to
1179 * the transport latency. To accomodate for that
1180 * we artificially decrease the sleep time until
1181 * we have filled the buffer at least once
1182 * completely.*/
1183
1184 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1185 sleep_usec /= 2;
1186 }
1187
1188 /* OK, the playback buffer is now full, let's
1189 * calculate when to wake up next */
1190 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1191
1192 /* Convert from the sound card time domain to the
1193 * system time domain */
1194 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1195
1196 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1197
1198 /* We don't trust the conversion, so we wake up whatever comes first */
1199 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1200 }
1201
1202 u->first = FALSE;
1203 u->after_rewind = FALSE;
1204
1205 } else if (u->use_tsched)
1206
1207 /* OK, we're in an invalid state, let's disable our timers */
1208 pa_rtpoll_set_timer_disabled(u->rtpoll);
1209
1210 /* Hmm, nothing to do. Let's sleep */
1211 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1212 goto fail;
1213
1214 if (ret == 0)
1215 goto finish;
1216
1217 /* Tell ALSA about this and process its response */
1218 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1219 struct pollfd *pollfd;
1220 int err;
1221 unsigned n;
1222
1223 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1224
1225 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1226 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1227 goto fail;
1228 }
1229
1230 if (revents & ~POLLOUT) {
1231 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1232 goto fail;
1233
1234 u->first = TRUE;
1235 u->since_start = 0;
1236 } else if (revents && u->use_tsched && pa_log_ratelimit())
1237 pa_log_debug("Wakeup from ALSA!");
1238
1239 } else
1240 revents = 0;
1241 }
1242
1243 fail:
1244 /* If this was no regular exit from the loop we have to continue
1245 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1246 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1247 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1248
1249 finish:
1250 pa_log_debug("Thread shutting down");
1251 }
1252
1253 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name) {
1254 const char *n;
1255 char *t;
1256
1257 pa_assert(data);
1258 pa_assert(ma);
1259 pa_assert(device_name);
1260
1261 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1262 pa_sink_new_data_set_name(data, n);
1263 data->namereg_fail = TRUE;
1264 return;
1265 }
1266
1267 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1268 data->namereg_fail = TRUE;
1269 else {
1270 n = device_id ? device_id : device_name;
1271 data->namereg_fail = FALSE;
1272 }
1273
1274 t = pa_sprintf_malloc("alsa_output.%s", n);
1275 pa_sink_new_data_set_name(data, t);
1276 pa_xfree(t);
1277 }
1278
1279 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1280 pa_assert(u);
1281
1282 if (!u->mixer_handle)
1283 return 0;
1284
1285 pa_assert(u->mixer_elem);
1286
1287 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1288 pa_bool_t suitable = FALSE;
1289
1290 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1291 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1292 else if (u->hw_volume_min >= u->hw_volume_max)
1293 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1294 else {
1295 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1296 suitable = TRUE;
1297 }
1298
1299 if (suitable) {
1300 if (ignore_dB || snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1301 pa_log_info("Mixer doesn't support dB information or data is ignored.");
1302 else {
1303 #ifdef HAVE_VALGRIND_MEMCHECK_H
1304 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1305 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1306 #endif
1307
1308 if (u->hw_dB_min >= u->hw_dB_max)
1309 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1310 else {
1311 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1312 u->hw_dB_supported = TRUE;
1313
1314 if (u->hw_dB_max > 0) {
1315 u->sink->base_volume = pa_sw_volume_from_dB(- (double) u->hw_dB_max/100.0);
1316 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1317 } else
1318 pa_log_info("No particular base volume set, fixing to 0 dB");
1319 }
1320 }
1321
1322 if (!u->hw_dB_supported &&
1323 u->hw_volume_max - u->hw_volume_min < 3) {
1324
1325 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1326 suitable = FALSE;
1327 }
1328 }
1329
1330 if (suitable) {
1331 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &u->sink->channel_map, u->mixer_map, TRUE) >= 0;
1332
1333 u->sink->get_volume = sink_get_volume_cb;
1334 u->sink->set_volume = sink_set_volume_cb;
1335 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1336 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1337
1338 if (!u->hw_dB_supported)
1339 u->sink->n_volume_steps = u->hw_volume_max - u->hw_volume_min + 1;
1340 } else
1341 pa_log_info("Using software volume control.");
1342 }
1343
1344 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1345 u->sink->get_mute = sink_get_mute_cb;
1346 u->sink->set_mute = sink_set_mute_cb;
1347 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1348 } else
1349 pa_log_info("Using software mute control.");
1350
1351 u->mixer_fdl = pa_alsa_fdlist_new();
1352
1353 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1354 pa_log("Failed to initialize file descriptor monitoring");
1355 return -1;
1356 }
1357
1358 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1359 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1360
1361 return 0;
1362 }
1363
1364 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, const pa_alsa_profile_info *profile) {
1365
1366 struct userdata *u = NULL;
1367 const char *dev_id = NULL;
1368 pa_sample_spec ss;
1369 pa_channel_map map;
1370 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1371 snd_pcm_uframes_t period_frames, tsched_frames;
1372 size_t frame_size;
1373 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1374 pa_usec_t usec;
1375 pa_sink_new_data data;
1376
1377 pa_assert(m);
1378 pa_assert(ma);
1379
1380 ss = m->core->default_sample_spec;
1381 map = m->core->default_channel_map;
1382 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1383 pa_log("Failed to parse sample specification and channel map");
1384 goto fail;
1385 }
1386
1387 frame_size = pa_frame_size(&ss);
1388
1389 nfrags = m->core->default_n_fragments;
1390 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1391 if (frag_size <= 0)
1392 frag_size = (uint32_t) frame_size;
1393 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1394 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1395
1396 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1397 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1398 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1399 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1400 pa_log("Failed to parse buffer metrics");
1401 goto fail;
1402 }
1403
1404 hwbuf_size = frag_size * nfrags;
1405 period_frames = frag_size/frame_size;
1406 tsched_frames = tsched_size/frame_size;
1407
1408 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1409 pa_log("Failed to parse mmap argument.");
1410 goto fail;
1411 }
1412
1413 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1414 pa_log("Failed to parse tsched argument.");
1415 goto fail;
1416 }
1417
1418 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1419 pa_log("Failed to parse ignore_dB argument.");
1420 goto fail;
1421 }
1422
1423 if (use_tsched && !pa_rtclock_hrtimer()) {
1424 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1425 use_tsched = FALSE;
1426 }
1427
1428 u = pa_xnew0(struct userdata, 1);
1429 u->core = m->core;
1430 u->module = m;
1431 u->use_mmap = use_mmap;
1432 u->use_tsched = use_tsched;
1433 u->first = TRUE;
1434 u->rtpoll = pa_rtpoll_new();
1435 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1436
1437 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1438 usec = pa_rtclock_usec();
1439 pa_smoother_set_time_offset(u->smoother, usec);
1440 pa_smoother_pause(u->smoother, usec);
1441
1442 b = use_mmap;
1443 d = use_tsched;
1444
1445 if (profile) {
1446
1447 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1448 pa_log("device_id= not set");
1449 goto fail;
1450 }
1451
1452 if (!(u->pcm_handle = pa_alsa_open_by_device_id_profile(
1453 dev_id,
1454 &u->device_name,
1455 &ss, &map,
1456 SND_PCM_STREAM_PLAYBACK,
1457 &nfrags, &period_frames, tsched_frames,
1458 &b, &d, profile)))
1459
1460 goto fail;
1461
1462 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1463
1464 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1465 dev_id,
1466 &u->device_name,
1467 &ss, &map,
1468 SND_PCM_STREAM_PLAYBACK,
1469 &nfrags, &period_frames, tsched_frames,
1470 &b, &d, &profile)))
1471
1472 goto fail;
1473
1474 } else {
1475
1476 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1477 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1478 &u->device_name,
1479 &ss, &map,
1480 SND_PCM_STREAM_PLAYBACK,
1481 &nfrags, &period_frames, tsched_frames,
1482 &b, &d, FALSE)))
1483 goto fail;
1484
1485 }
1486
1487 pa_assert(u->device_name);
1488 pa_log_info("Successfully opened device %s.", u->device_name);
1489
1490 if (profile)
1491 pa_log_info("Selected configuration '%s' (%s).", profile->description, profile->name);
1492
1493 if (use_mmap && !b) {
1494 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1495 u->use_mmap = use_mmap = FALSE;
1496 }
1497
1498 if (use_tsched && (!b || !d)) {
1499 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1500 u->use_tsched = use_tsched = FALSE;
1501 }
1502
1503 if (u->use_mmap)
1504 pa_log_info("Successfully enabled mmap() mode.");
1505
1506 if (u->use_tsched)
1507 pa_log_info("Successfully enabled timer-based scheduling mode.");
1508
1509 /* ALSA might tweak the sample spec, so recalculate the frame size */
1510 frame_size = pa_frame_size(&ss);
1511
1512 pa_alsa_find_mixer_and_elem(u->pcm_handle, &u->mixer_handle, &u->mixer_elem);
1513
1514 pa_sink_new_data_init(&data);
1515 data.driver = driver;
1516 data.module = m;
1517 data.card = card;
1518 set_sink_name(&data, ma, dev_id, u->device_name);
1519 pa_sink_new_data_set_sample_spec(&data, &ss);
1520 pa_sink_new_data_set_channel_map(&data, &map);
1521
1522 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1523 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1524 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1525 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1526 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1527
1528 if (profile) {
1529 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, profile->name);
1530 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, profile->description);
1531 }
1532
1533 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1534 pa_sink_new_data_done(&data);
1535
1536 if (!u->sink) {
1537 pa_log("Failed to create sink object");
1538 goto fail;
1539 }
1540
1541 u->sink->parent.process_msg = sink_process_msg;
1542 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1543 u->sink->userdata = u;
1544
1545 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1546 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1547
1548 u->frame_size = frame_size;
1549 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1550 u->nfragments = nfrags;
1551 u->hwbuf_size = u->fragment_size * nfrags;
1552 u->tsched_watermark = tsched_watermark;
1553 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1554
1555 if (use_tsched) {
1556 fix_min_sleep_wakeup(u);
1557 fix_tsched_watermark(u);
1558 }
1559
1560 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1561 u->sink->thread_info.max_request = u->hwbuf_size;
1562
1563 pa_sink_set_latency_range(u->sink,
1564 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1565 pa_bytes_to_usec(u->hwbuf_size, &ss));
1566
1567 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1568 nfrags, (long unsigned) u->fragment_size,
1569 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1570
1571 if (use_tsched)
1572 pa_log_info("Time scheduling watermark is %0.2fms",
1573 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1574
1575 if (update_sw_params(u) < 0)
1576 goto fail;
1577
1578 if (setup_mixer(u, ignore_dB) < 0)
1579 goto fail;
1580
1581 pa_alsa_dump(u->pcm_handle);
1582
1583 if (!(u->thread = pa_thread_new(thread_func, u))) {
1584 pa_log("Failed to create thread.");
1585 goto fail;
1586 }
1587
1588 /* Get initial mixer settings */
1589 if (data.volume_is_set) {
1590 if (u->sink->set_volume)
1591 u->sink->set_volume(u->sink);
1592 } else {
1593 if (u->sink->get_volume)
1594 u->sink->get_volume(u->sink);
1595 }
1596
1597 if (data.muted_is_set) {
1598 if (u->sink->set_mute)
1599 u->sink->set_mute(u->sink);
1600 } else {
1601 if (u->sink->get_mute)
1602 u->sink->get_mute(u->sink);
1603 }
1604
1605 pa_sink_put(u->sink);
1606
1607 return u->sink;
1608
1609 fail:
1610
1611 userdata_free(u);
1612
1613 return NULL;
1614 }
1615
1616 static void userdata_free(struct userdata *u) {
1617 pa_assert(u);
1618
1619 if (u->sink)
1620 pa_sink_unlink(u->sink);
1621
1622 if (u->thread) {
1623 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1624 pa_thread_free(u->thread);
1625 }
1626
1627 pa_thread_mq_done(&u->thread_mq);
1628
1629 if (u->sink)
1630 pa_sink_unref(u->sink);
1631
1632 if (u->memchunk.memblock)
1633 pa_memblock_unref(u->memchunk.memblock);
1634
1635 if (u->alsa_rtpoll_item)
1636 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1637
1638 if (u->rtpoll)
1639 pa_rtpoll_free(u->rtpoll);
1640
1641 if (u->mixer_fdl)
1642 pa_alsa_fdlist_free(u->mixer_fdl);
1643
1644 if (u->mixer_handle)
1645 snd_mixer_close(u->mixer_handle);
1646
1647 if (u->pcm_handle) {
1648 snd_pcm_drop(u->pcm_handle);
1649 snd_pcm_close(u->pcm_handle);
1650 }
1651
1652 if (u->smoother)
1653 pa_smoother_free(u->smoother);
1654
1655 pa_xfree(u->device_name);
1656 pa_xfree(u);
1657 }
1658
1659 void pa_alsa_sink_free(pa_sink *s) {
1660 struct userdata *u;
1661
1662 pa_sink_assert_ref(s);
1663 pa_assert_se(u = s->userdata);
1664
1665 userdata_free(u);
1666 }