]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
print driver name when we encounter driver bugs
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38 #include <pulse/i18n.h>
39
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/rtclock.h>
54 #include <pulsecore/time-smoother.h>
55
56 #include "alsa-util.h"
57 #include "alsa-sink.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
65 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
66
67 struct userdata {
68 pa_core *core;
69 pa_module *module;
70 pa_sink *sink;
71
72 pa_thread *thread;
73 pa_thread_mq thread_mq;
74 pa_rtpoll *rtpoll;
75
76 snd_pcm_t *pcm_handle;
77
78 pa_alsa_fdlist *mixer_fdl;
79 snd_mixer_t *mixer_handle;
80 snd_mixer_elem_t *mixer_elem;
81 long hw_volume_max, hw_volume_min;
82 long hw_dB_max, hw_dB_min;
83 pa_bool_t hw_dB_supported:1;
84 pa_bool_t mixer_seperate_channels:1;
85 pa_cvolume hardware_volume;
86
87 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark, hwbuf_unused, min_sleep, min_wakeup;
88 unsigned nfragments;
89 pa_memchunk memchunk;
90
91 char *device_name;
92
93 pa_bool_t use_mmap:1, use_tsched:1;
94
95 pa_bool_t first, after_rewind;
96
97 pa_rtpoll_item *alsa_rtpoll_item;
98
99 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
100
101 pa_smoother *smoother;
102 uint64_t write_count;
103 uint64_t since_start;
104 };
105
106 static void userdata_free(struct userdata *u);
107
108 static void fix_min_sleep_wakeup(struct userdata *u) {
109 size_t max_use, max_use_2;
110
111 pa_assert(u);
112
113 max_use = u->hwbuf_size - u->hwbuf_unused;
114 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
115
116 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
117 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
118
119 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
120 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
121 }
122
123 static void fix_tsched_watermark(struct userdata *u) {
124 size_t max_use;
125 pa_assert(u);
126
127 max_use = u->hwbuf_size - u->hwbuf_unused;
128
129 if (u->tsched_watermark > max_use - u->min_sleep)
130 u->tsched_watermark = max_use - u->min_sleep;
131
132 if (u->tsched_watermark < u->min_wakeup)
133 u->tsched_watermark = u->min_wakeup;
134 }
135
136 static void adjust_after_underrun(struct userdata *u) {
137 size_t old_watermark;
138 pa_usec_t old_min_latency, new_min_latency;
139
140 pa_assert(u);
141
142 /* First, just try to increase the watermark */
143 old_watermark = u->tsched_watermark;
144 u->tsched_watermark *= 2;
145 fix_tsched_watermark(u);
146
147 if (old_watermark != u->tsched_watermark) {
148 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
149 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
150 return;
151 }
152
153 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
154 old_min_latency = u->sink->thread_info.min_latency;
155 new_min_latency = PA_MIN(old_min_latency * 2, u->sink->thread_info.max_latency);
156
157 if (old_min_latency != new_min_latency) {
158 pa_log_notice("Increasing minimal latency to %0.2f ms",
159 (double) new_min_latency / PA_USEC_PER_MSEC);
160
161 pa_sink_update_latency_range(u->sink, new_min_latency, u->sink->thread_info.max_latency);
162 return;
163 }
164
165 /* When we reach this we're officialy fucked! */
166 }
167
168 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
169 pa_usec_t usec, wm;
170
171 pa_assert(sleep_usec);
172 pa_assert(process_usec);
173
174 pa_assert(u);
175
176 usec = pa_sink_get_requested_latency_within_thread(u->sink);
177
178 if (usec == (pa_usec_t) -1)
179 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
180
181 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
182
183 if (wm > usec)
184 wm = usec/2;
185
186 *sleep_usec = usec - wm;
187 *process_usec = wm;
188
189 #ifdef DEBUG_TIMING
190 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
191 (unsigned long) (usec / PA_USEC_PER_MSEC),
192 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
193 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
194 #endif
195 }
196
197 static int try_recover(struct userdata *u, const char *call, int err) {
198 pa_assert(u);
199 pa_assert(call);
200 pa_assert(err < 0);
201
202 pa_log_debug("%s: %s", call, snd_strerror(err));
203
204 pa_assert(err != -EAGAIN);
205
206 if (err == -EPIPE)
207 pa_log_debug("%s: Buffer underrun!", call);
208
209 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
210 pa_log("%s: %s", call, snd_strerror(err));
211 return -1;
212 }
213
214 u->first = TRUE;
215 u->since_start = 0;
216 return 0;
217 }
218
219 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
220 size_t left_to_play;
221
222 /* We use <= instead of < for this check here because an underrun
223 * only happens after the last sample was processed, not already when
224 * it is removed from the buffer. This is particularly important
225 * when block transfer is used. */
226
227 if (n_bytes <= u->hwbuf_size) {
228 left_to_play = u->hwbuf_size - n_bytes;
229
230 #ifdef DEBUG_TIMING
231 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
232 #endif
233
234 } else {
235 left_to_play = 0;
236
237 #ifdef DEBUG_TIMING
238 PA_DEBUG_TRAP;
239 #endif
240
241 if (!u->first && !u->after_rewind) {
242
243 if (pa_log_ratelimit())
244 pa_log_info("Underrun!");
245
246 if (u->use_tsched)
247 adjust_after_underrun(u);
248 }
249 }
250
251 return left_to_play;
252 }
253
254 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
255 pa_bool_t work_done = TRUE;
256 pa_usec_t max_sleep_usec = 0, process_usec = 0;
257 size_t left_to_play;
258
259 pa_assert(u);
260 pa_sink_assert_ref(u->sink);
261
262 if (u->use_tsched)
263 hw_sleep_time(u, &max_sleep_usec, &process_usec);
264
265 for (;;) {
266 snd_pcm_sframes_t n;
267 size_t n_bytes;
268 int r;
269
270 /* First we determine how many samples are missing to fill the
271 * buffer up to 100% */
272
273 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
274
275 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
276 continue;
277
278 return r;
279 }
280
281 n_bytes = (size_t) n * u->frame_size;
282
283 #ifdef DEBUG_TIMING
284 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
285 #endif
286
287 left_to_play = check_left_to_play(u, n_bytes);
288
289 if (u->use_tsched)
290
291 /* We won't fill up the playback buffer before at least
292 * half the sleep time is over because otherwise we might
293 * ask for more data from the clients then they expect. We
294 * need to guarantee that clients only have to keep around
295 * a single hw buffer length. */
296
297 if (!polled &&
298 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
299 #ifdef DEBUG_TIMING
300 pa_log_debug("Not filling up, because too early.");
301 #endif
302 break;
303 }
304
305 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
306
307 if (polled)
308 PA_ONCE_BEGIN {
309 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
310 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
311 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
312 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
313 pa_strnull(dn));
314 pa_xfree(dn);
315 } PA_ONCE_END;
316
317 #ifdef DEBUG_TIMING
318 pa_log_debug("Not filling up, because not necessary.");
319 #endif
320 break;
321 }
322
323 n_bytes -= u->hwbuf_unused;
324 polled = FALSE;
325
326 #ifdef DEBUG_TIMING
327 pa_log_debug("Filling up");
328 #endif
329
330 for (;;) {
331 pa_memchunk chunk;
332 void *p;
333 int err;
334 const snd_pcm_channel_area_t *areas;
335 snd_pcm_uframes_t offset, frames;
336 snd_pcm_sframes_t sframes;
337
338 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
339 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
340
341 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
342
343 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
344 continue;
345
346 return r;
347 }
348
349 /* Make sure that if these memblocks need to be copied they will fit into one slot */
350 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
351 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
352
353 /* Check these are multiples of 8 bit */
354 pa_assert((areas[0].first & 7) == 0);
355 pa_assert((areas[0].step & 7)== 0);
356
357 /* We assume a single interleaved memory buffer */
358 pa_assert((areas[0].first >> 3) == 0);
359 pa_assert((areas[0].step >> 3) == u->frame_size);
360
361 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
362
363 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
364 chunk.length = pa_memblock_get_length(chunk.memblock);
365 chunk.index = 0;
366
367 pa_sink_render_into_full(u->sink, &chunk);
368 pa_memblock_unref_fixed(chunk.memblock);
369
370 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
371
372 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
373 continue;
374
375 return r;
376 }
377
378 work_done = TRUE;
379
380 u->write_count += frames * u->frame_size;
381 u->since_start += frames * u->frame_size;
382
383 #ifdef DEBUG_TIMING
384 pa_log_debug("Wrote %lu bytes", (unsigned long) (frames * u->frame_size));
385 #endif
386
387 if ((size_t) frames * u->frame_size >= n_bytes)
388 break;
389
390 n_bytes -= (size_t) frames * u->frame_size;
391 }
392 }
393
394 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
395 return work_done ? 1 : 0;
396 }
397
398 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
399 pa_bool_t work_done = FALSE;
400 pa_usec_t max_sleep_usec = 0, process_usec = 0;
401 size_t left_to_play;
402
403 pa_assert(u);
404 pa_sink_assert_ref(u->sink);
405
406 if (u->use_tsched)
407 hw_sleep_time(u, &max_sleep_usec, &process_usec);
408
409 for (;;) {
410 snd_pcm_sframes_t n;
411 size_t n_bytes;
412 int r;
413
414 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
415
416 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
417 continue;
418
419 return r;
420 }
421
422 n_bytes = (size_t) n * u->frame_size;
423 left_to_play = check_left_to_play(u, n_bytes);
424
425 if (u->use_tsched)
426
427 /* We won't fill up the playback buffer before at least
428 * half the sleep time is over because otherwise we might
429 * ask for more data from the clients then they expect. We
430 * need to guarantee that clients only have to keep around
431 * a single hw buffer length. */
432
433 if (!polled &&
434 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
435 break;
436
437 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
438
439 if (polled)
440 PA_ONCE_BEGIN {
441 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
442 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
443 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
444 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
445 pa_strnull(dn));
446 pa_xfree(dn);
447 } PA_ONCE_END;
448
449 break;
450 }
451
452 n_bytes -= u->hwbuf_unused;
453 polled = FALSE;
454
455 for (;;) {
456 snd_pcm_sframes_t frames;
457 void *p;
458
459 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
460
461 if (u->memchunk.length <= 0)
462 pa_sink_render(u->sink, n_bytes, &u->memchunk);
463
464 pa_assert(u->memchunk.length > 0);
465
466 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
467
468 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
469 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
470
471 p = pa_memblock_acquire(u->memchunk.memblock);
472 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
473 pa_memblock_release(u->memchunk.memblock);
474
475 pa_assert(frames != 0);
476
477 if (PA_UNLIKELY(frames < 0)) {
478
479 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
480 continue;
481
482 return r;
483 }
484
485 u->memchunk.index += (size_t) frames * u->frame_size;
486 u->memchunk.length -= (size_t) frames * u->frame_size;
487
488 if (u->memchunk.length <= 0) {
489 pa_memblock_unref(u->memchunk.memblock);
490 pa_memchunk_reset(&u->memchunk);
491 }
492
493 work_done = TRUE;
494
495 u->write_count += frames * u->frame_size;
496 u->since_start += frames * u->frame_size;
497
498 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
499
500 if ((size_t) frames * u->frame_size >= n_bytes)
501 break;
502
503 n_bytes -= (size_t) frames * u->frame_size;
504 }
505 }
506
507 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
508 return work_done ? 1 : 0;
509 }
510
511 static void update_smoother(struct userdata *u) {
512 snd_pcm_sframes_t delay = 0;
513 int64_t position;
514 int err;
515 pa_usec_t now1 = 0, now2;
516 snd_pcm_status_t *status;
517
518 snd_pcm_status_alloca(&status);
519
520 pa_assert(u);
521 pa_assert(u->pcm_handle);
522
523 /* Let's update the time smoother */
524
525 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
526 pa_log_warn("Failed to query DSP status data: %s", snd_strerror(err));
527 return;
528 }
529
530 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
531 pa_log_warn("Failed to get timestamp: %s", snd_strerror(err));
532 else {
533 snd_htimestamp_t htstamp = { 0, 0 };
534 snd_pcm_status_get_htstamp(status, &htstamp);
535 now1 = pa_timespec_load(&htstamp);
536 }
537
538 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
539
540 if (PA_UNLIKELY(position < 0))
541 position = 0;
542
543 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
544 if (now1 <= 0)
545 now1 = pa_rtclock_usec();
546
547 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
548
549 pa_smoother_put(u->smoother, now1, now2);
550 }
551
552 static pa_usec_t sink_get_latency(struct userdata *u) {
553 pa_usec_t r;
554 int64_t delay;
555 pa_usec_t now1, now2;
556
557 pa_assert(u);
558
559 now1 = pa_rtclock_usec();
560 now2 = pa_smoother_get(u->smoother, now1);
561
562 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
563
564 r = delay >= 0 ? (pa_usec_t) delay : 0;
565
566 if (u->memchunk.memblock)
567 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
568
569 return r;
570 }
571
572 static int build_pollfd(struct userdata *u) {
573 pa_assert(u);
574 pa_assert(u->pcm_handle);
575
576 if (u->alsa_rtpoll_item)
577 pa_rtpoll_item_free(u->alsa_rtpoll_item);
578
579 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
580 return -1;
581
582 return 0;
583 }
584
585 static int suspend(struct userdata *u) {
586 pa_assert(u);
587 pa_assert(u->pcm_handle);
588
589 pa_smoother_pause(u->smoother, pa_rtclock_usec());
590
591 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
592 * take awfully long with our long buffer sizes today. */
593 snd_pcm_close(u->pcm_handle);
594 u->pcm_handle = NULL;
595
596 if (u->alsa_rtpoll_item) {
597 pa_rtpoll_item_free(u->alsa_rtpoll_item);
598 u->alsa_rtpoll_item = NULL;
599 }
600
601 pa_log_info("Device suspended...");
602
603 return 0;
604 }
605
606 static int update_sw_params(struct userdata *u) {
607 snd_pcm_uframes_t avail_min;
608 int err;
609
610 pa_assert(u);
611
612 /* Use the full buffer if noone asked us for anything specific */
613 u->hwbuf_unused = 0;
614
615 if (u->use_tsched) {
616 pa_usec_t latency;
617
618 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
619 size_t b;
620
621 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
622
623 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
624
625 /* We need at least one sample in our buffer */
626
627 if (PA_UNLIKELY(b < u->frame_size))
628 b = u->frame_size;
629
630 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
631 }
632
633 fix_min_sleep_wakeup(u);
634 fix_tsched_watermark(u);
635 }
636
637 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
638
639 /* We need at last one frame in the used part of the buffer */
640 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
641
642 if (u->use_tsched) {
643 pa_usec_t sleep_usec, process_usec;
644
645 hw_sleep_time(u, &sleep_usec, &process_usec);
646 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
647 }
648
649 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
650
651 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
652 pa_log("Failed to set software parameters: %s", snd_strerror(err));
653 return err;
654 }
655
656 pa_sink_set_max_request(u->sink, u->hwbuf_size - u->hwbuf_unused);
657
658 return 0;
659 }
660
661 static int unsuspend(struct userdata *u) {
662 pa_sample_spec ss;
663 int err;
664 pa_bool_t b, d;
665 unsigned nfrags;
666 snd_pcm_uframes_t period_size;
667
668 pa_assert(u);
669 pa_assert(!u->pcm_handle);
670
671 pa_log_info("Trying resume...");
672
673 snd_config_update_free_global();
674 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
675 /*SND_PCM_NONBLOCK|*/
676 SND_PCM_NO_AUTO_RESAMPLE|
677 SND_PCM_NO_AUTO_CHANNELS|
678 SND_PCM_NO_AUTO_FORMAT)) < 0) {
679 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
680 goto fail;
681 }
682
683 ss = u->sink->sample_spec;
684 nfrags = u->nfragments;
685 period_size = u->fragment_size / u->frame_size;
686 b = u->use_mmap;
687 d = u->use_tsched;
688
689 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
690 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
691 goto fail;
692 }
693
694 if (b != u->use_mmap || d != u->use_tsched) {
695 pa_log_warn("Resume failed, couldn't get original access mode.");
696 goto fail;
697 }
698
699 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
700 pa_log_warn("Resume failed, couldn't restore original sample settings.");
701 goto fail;
702 }
703
704 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
705 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
706 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
707 (unsigned long) nfrags, period_size * u->frame_size);
708 goto fail;
709 }
710
711 if (update_sw_params(u) < 0)
712 goto fail;
713
714 if (build_pollfd(u) < 0)
715 goto fail;
716
717 u->first = TRUE;
718 u->since_start = 0;
719
720 pa_log_info("Resumed successfully...");
721
722 return 0;
723
724 fail:
725 if (u->pcm_handle) {
726 snd_pcm_close(u->pcm_handle);
727 u->pcm_handle = NULL;
728 }
729
730 return -1;
731 }
732
733 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
734 struct userdata *u = PA_SINK(o)->userdata;
735
736 switch (code) {
737
738 case PA_SINK_MESSAGE_GET_LATENCY: {
739 pa_usec_t r = 0;
740
741 if (u->pcm_handle)
742 r = sink_get_latency(u);
743
744 *((pa_usec_t*) data) = r;
745
746 return 0;
747 }
748
749 case PA_SINK_MESSAGE_SET_STATE:
750
751 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
752
753 case PA_SINK_SUSPENDED:
754 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
755
756 if (suspend(u) < 0)
757 return -1;
758
759 break;
760
761 case PA_SINK_IDLE:
762 case PA_SINK_RUNNING:
763
764 if (u->sink->thread_info.state == PA_SINK_INIT) {
765 if (build_pollfd(u) < 0)
766 return -1;
767 }
768
769 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
770 if (unsuspend(u) < 0)
771 return -1;
772 }
773
774 break;
775
776 case PA_SINK_UNLINKED:
777 case PA_SINK_INIT:
778 case PA_SINK_INVALID_STATE:
779 ;
780 }
781
782 break;
783 }
784
785 return pa_sink_process_msg(o, code, data, offset, chunk);
786 }
787
788 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
789 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
790
791 pa_assert(u);
792 pa_assert(u->mixer_handle);
793
794 if (mask == SND_CTL_EVENT_MASK_REMOVE)
795 return 0;
796
797 if (mask & SND_CTL_EVENT_MASK_VALUE) {
798 pa_sink_get_volume(u->sink, TRUE);
799 pa_sink_get_mute(u->sink, TRUE);
800 }
801
802 return 0;
803 }
804
805 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
806
807 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
808 (double) (u->hw_volume_max - u->hw_volume_min));
809 }
810
811 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
812 long alsa_vol;
813
814 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
815 / PA_VOLUME_NORM) + u->hw_volume_min;
816
817 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
818 }
819
820 static void sink_get_volume_cb(pa_sink *s) {
821 struct userdata *u = s->userdata;
822 int err;
823 unsigned i;
824 pa_cvolume r;
825 char t[PA_CVOLUME_SNPRINT_MAX];
826
827 pa_assert(u);
828 pa_assert(u->mixer_elem);
829
830 if (u->mixer_seperate_channels) {
831
832 r.channels = s->sample_spec.channels;
833
834 for (i = 0; i < s->sample_spec.channels; i++) {
835 long alsa_vol;
836
837 if (u->hw_dB_supported) {
838
839 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
840 goto fail;
841
842 #ifdef HAVE_VALGRIND_MEMCHECK_H
843 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
844 #endif
845
846 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
847 } else {
848
849 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
850 goto fail;
851
852 r.values[i] = from_alsa_volume(u, alsa_vol);
853 }
854 }
855
856 } else {
857 long alsa_vol;
858
859 if (u->hw_dB_supported) {
860
861 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
862 goto fail;
863
864 #ifdef HAVE_VALGRIND_MEMCHECK_H
865 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
866 #endif
867
868 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
869
870 } else {
871
872 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
873 goto fail;
874
875 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
876 }
877 }
878
879 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
880
881 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
882
883 s->virtual_volume = u->hardware_volume = r;
884
885 if (u->hw_dB_supported) {
886 pa_cvolume reset;
887
888 /* Hmm, so the hardware volume changed, let's reset our software volume */
889 pa_cvolume_reset(&reset, s->sample_spec.channels);
890 pa_sink_set_soft_volume(s, &reset);
891 }
892 }
893
894 return;
895
896 fail:
897 pa_log_error("Unable to read volume: %s", snd_strerror(err));
898 }
899
900 static void sink_set_volume_cb(pa_sink *s) {
901 struct userdata *u = s->userdata;
902 int err;
903 unsigned i;
904 pa_cvolume r;
905
906 pa_assert(u);
907 pa_assert(u->mixer_elem);
908
909 if (u->mixer_seperate_channels) {
910
911 r.channels = s->sample_spec.channels;
912
913 for (i = 0; i < s->sample_spec.channels; i++) {
914 long alsa_vol;
915 pa_volume_t vol;
916
917 vol = s->virtual_volume.values[i];
918
919 if (u->hw_dB_supported) {
920
921 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
922 alsa_vol += u->hw_dB_max;
923 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
924
925 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
926 goto fail;
927
928 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
929 goto fail;
930
931 #ifdef HAVE_VALGRIND_MEMCHECK_H
932 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
933 #endif
934
935 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
936
937 } else {
938 alsa_vol = to_alsa_volume(u, vol);
939
940 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
941 goto fail;
942
943 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
944 goto fail;
945
946 r.values[i] = from_alsa_volume(u, alsa_vol);
947 }
948 }
949
950 } else {
951 pa_volume_t vol;
952 long alsa_vol;
953
954 vol = pa_cvolume_max(&s->virtual_volume);
955
956 if (u->hw_dB_supported) {
957 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
958 alsa_vol += u->hw_dB_max;
959 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
960
961 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
962 goto fail;
963
964 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
965 goto fail;
966
967 #ifdef HAVE_VALGRIND_MEMCHECK_H
968 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
969 #endif
970
971 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
972
973 } else {
974 alsa_vol = to_alsa_volume(u, vol);
975
976 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
977 goto fail;
978
979 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
980 goto fail;
981
982 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
983 }
984 }
985
986 u->hardware_volume = r;
987
988 if (u->hw_dB_supported) {
989 char t[PA_CVOLUME_SNPRINT_MAX];
990
991 /* Match exactly what the user requested by software */
992 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
993
994 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
995 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
996 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
997
998 } else
999
1000 /* We can't match exactly what the user requested, hence let's
1001 * at least tell the user about it */
1002
1003 s->virtual_volume = r;
1004
1005 return;
1006
1007 fail:
1008 pa_log_error("Unable to set volume: %s", snd_strerror(err));
1009 }
1010
1011 static void sink_get_mute_cb(pa_sink *s) {
1012 struct userdata *u = s->userdata;
1013 int err, sw;
1014
1015 pa_assert(u);
1016 pa_assert(u->mixer_elem);
1017
1018 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
1019 pa_log_error("Unable to get switch: %s", snd_strerror(err));
1020 return;
1021 }
1022
1023 s->muted = !sw;
1024 }
1025
1026 static void sink_set_mute_cb(pa_sink *s) {
1027 struct userdata *u = s->userdata;
1028 int err;
1029
1030 pa_assert(u);
1031 pa_assert(u->mixer_elem);
1032
1033 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
1034 pa_log_error("Unable to set switch: %s", snd_strerror(err));
1035 return;
1036 }
1037 }
1038
1039 static void sink_update_requested_latency_cb(pa_sink *s) {
1040 struct userdata *u = s->userdata;
1041 size_t before;
1042 pa_assert(u);
1043
1044 if (!u->pcm_handle)
1045 return;
1046
1047 before = u->hwbuf_unused;
1048 update_sw_params(u);
1049
1050 /* Let's check whether we now use only a smaller part of the
1051 buffer then before. If so, we need to make sure that subsequent
1052 rewinds are relative to the new maxium fill level and not to the
1053 current fill level. Thus, let's do a full rewind once, to clear
1054 things up. */
1055
1056 if (u->hwbuf_unused > before) {
1057 pa_log_debug("Requesting rewind due to latency change.");
1058 pa_sink_request_rewind(s, (size_t) -1);
1059 }
1060 }
1061
1062 static int process_rewind(struct userdata *u) {
1063 snd_pcm_sframes_t unused;
1064 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1065 pa_assert(u);
1066
1067 /* Figure out how much we shall rewind and reset the counter */
1068 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1069 u->sink->thread_info.rewind_nbytes = 0;
1070
1071 if (rewind_nbytes <= 0)
1072 goto finish;
1073
1074 pa_assert(rewind_nbytes > 0);
1075 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1076
1077 snd_pcm_hwsync(u->pcm_handle);
1078 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
1079 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused));
1080 return -1;
1081 }
1082
1083 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1084
1085 if (u->hwbuf_size > unused_nbytes)
1086 limit_nbytes = u->hwbuf_size - unused_nbytes;
1087 else
1088 limit_nbytes = 0;
1089
1090 if (rewind_nbytes > limit_nbytes)
1091 rewind_nbytes = limit_nbytes;
1092
1093 if (rewind_nbytes > 0) {
1094 snd_pcm_sframes_t in_frames, out_frames;
1095
1096 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1097
1098 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1099 pa_log_debug("before: %lu", (unsigned long) in_frames);
1100 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1101 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1102 return -1;
1103 }
1104 pa_log_debug("after: %lu", (unsigned long) out_frames);
1105
1106 rewind_nbytes = (size_t) out_frames * u->frame_size;
1107
1108 if (rewind_nbytes <= 0)
1109 pa_log_info("Tried rewind, but was apparently not possible.");
1110 else {
1111 u->write_count -= out_frames * u->frame_size;
1112 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1113 pa_sink_process_rewind(u->sink, rewind_nbytes);
1114
1115 u->after_rewind = TRUE;
1116 return 0;
1117 }
1118 } else
1119 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1120
1121 finish:
1122
1123 pa_sink_process_rewind(u->sink, 0);
1124
1125 return 0;
1126
1127 }
1128
1129 static void thread_func(void *userdata) {
1130 struct userdata *u = userdata;
1131 unsigned short revents = 0;
1132
1133 pa_assert(u);
1134
1135 pa_log_debug("Thread starting up");
1136
1137 if (u->core->realtime_scheduling)
1138 pa_make_realtime(u->core->realtime_priority);
1139
1140 pa_thread_mq_install(&u->thread_mq);
1141 pa_rtpoll_install(u->rtpoll);
1142
1143 for (;;) {
1144 int ret;
1145
1146 #ifdef DEBUG_TIMING
1147 pa_log_debug("Loop");
1148 #endif
1149
1150 /* Render some data and write it to the dsp */
1151 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1152 int work_done;
1153 pa_usec_t sleep_usec = 0;
1154
1155 if (u->sink->thread_info.rewind_requested)
1156 if (process_rewind(u) < 0)
1157 goto fail;
1158
1159 if (u->use_mmap)
1160 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1161 else
1162 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1163
1164 if (work_done < 0)
1165 goto fail;
1166
1167 /* pa_log_debug("work_done = %i", work_done); */
1168
1169 if (work_done) {
1170
1171 if (u->first) {
1172 pa_log_info("Starting playback.");
1173 snd_pcm_start(u->pcm_handle);
1174
1175 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1176 }
1177
1178 update_smoother(u);
1179 }
1180
1181 if (u->use_tsched) {
1182 pa_usec_t cusec;
1183
1184 if (u->since_start <= u->hwbuf_size) {
1185
1186 /* USB devices on ALSA seem to hit a buffer
1187 * underrun during the first iterations much
1188 * quicker then we calculate here, probably due to
1189 * the transport latency. To accomodate for that
1190 * we artificially decrease the sleep time until
1191 * we have filled the buffer at least once
1192 * completely.*/
1193
1194 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1195 sleep_usec /= 2;
1196 }
1197
1198 /* OK, the playback buffer is now full, let's
1199 * calculate when to wake up next */
1200 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1201
1202 /* Convert from the sound card time domain to the
1203 * system time domain */
1204 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1205
1206 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1207
1208 /* We don't trust the conversion, so we wake up whatever comes first */
1209 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1210 }
1211
1212 u->first = FALSE;
1213 u->after_rewind = FALSE;
1214
1215 } else if (u->use_tsched)
1216
1217 /* OK, we're in an invalid state, let's disable our timers */
1218 pa_rtpoll_set_timer_disabled(u->rtpoll);
1219
1220 /* Hmm, nothing to do. Let's sleep */
1221 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1222 goto fail;
1223
1224 if (ret == 0)
1225 goto finish;
1226
1227 /* Tell ALSA about this and process its response */
1228 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1229 struct pollfd *pollfd;
1230 int err;
1231 unsigned n;
1232
1233 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1234
1235 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1236 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1237 goto fail;
1238 }
1239
1240 if (revents & ~POLLOUT) {
1241 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1242 goto fail;
1243
1244 u->first = TRUE;
1245 u->since_start = 0;
1246 } else if (revents && u->use_tsched && pa_log_ratelimit())
1247 pa_log_debug("Wakeup from ALSA!");
1248
1249 } else
1250 revents = 0;
1251 }
1252
1253 fail:
1254 /* If this was no regular exit from the loop we have to continue
1255 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1256 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1257 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1258
1259 finish:
1260 pa_log_debug("Thread shutting down");
1261 }
1262
1263 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name) {
1264 const char *n;
1265 char *t;
1266
1267 pa_assert(data);
1268 pa_assert(ma);
1269 pa_assert(device_name);
1270
1271 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1272 pa_sink_new_data_set_name(data, n);
1273 data->namereg_fail = TRUE;
1274 return;
1275 }
1276
1277 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1278 data->namereg_fail = TRUE;
1279 else {
1280 n = device_id ? device_id : device_name;
1281 data->namereg_fail = FALSE;
1282 }
1283
1284 t = pa_sprintf_malloc("alsa_output.%s", n);
1285 pa_sink_new_data_set_name(data, t);
1286 pa_xfree(t);
1287 }
1288
1289 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1290 pa_assert(u);
1291
1292 if (!u->mixer_handle)
1293 return 0;
1294
1295 pa_assert(u->mixer_elem);
1296
1297 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1298 pa_bool_t suitable = FALSE;
1299
1300 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1301 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1302 else if (u->hw_volume_min >= u->hw_volume_max)
1303 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1304 else {
1305 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1306 suitable = TRUE;
1307 }
1308
1309 if (suitable) {
1310 if (ignore_dB || snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1311 pa_log_info("Mixer doesn't support dB information or data is ignored.");
1312 else {
1313 #ifdef HAVE_VALGRIND_MEMCHECK_H
1314 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1315 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1316 #endif
1317
1318 if (u->hw_dB_min >= u->hw_dB_max)
1319 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1320 else {
1321 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1322 u->hw_dB_supported = TRUE;
1323
1324 if (u->hw_dB_max > 0) {
1325 u->sink->base_volume = pa_sw_volume_from_dB(- (double) u->hw_dB_max/100.0);
1326 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1327 } else
1328 pa_log_info("No particular base volume set, fixing to 0 dB");
1329 }
1330 }
1331
1332 if (!u->hw_dB_supported &&
1333 u->hw_volume_max - u->hw_volume_min < 3) {
1334
1335 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1336 suitable = FALSE;
1337 }
1338 }
1339
1340 if (suitable) {
1341 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &u->sink->channel_map, u->mixer_map, TRUE) >= 0;
1342
1343 u->sink->get_volume = sink_get_volume_cb;
1344 u->sink->set_volume = sink_set_volume_cb;
1345 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1346 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1347
1348 if (!u->hw_dB_supported)
1349 u->sink->n_volume_steps = u->hw_volume_max - u->hw_volume_min + 1;
1350 } else
1351 pa_log_info("Using software volume control.");
1352 }
1353
1354 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1355 u->sink->get_mute = sink_get_mute_cb;
1356 u->sink->set_mute = sink_set_mute_cb;
1357 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1358 } else
1359 pa_log_info("Using software mute control.");
1360
1361 u->mixer_fdl = pa_alsa_fdlist_new();
1362
1363 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1364 pa_log("Failed to initialize file descriptor monitoring");
1365 return -1;
1366 }
1367
1368 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1369 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1370
1371 return 0;
1372 }
1373
1374 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, const pa_alsa_profile_info *profile) {
1375
1376 struct userdata *u = NULL;
1377 const char *dev_id = NULL;
1378 pa_sample_spec ss;
1379 pa_channel_map map;
1380 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1381 snd_pcm_uframes_t period_frames, tsched_frames;
1382 size_t frame_size;
1383 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1384 pa_usec_t usec;
1385 pa_sink_new_data data;
1386
1387 pa_assert(m);
1388 pa_assert(ma);
1389
1390 ss = m->core->default_sample_spec;
1391 map = m->core->default_channel_map;
1392 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1393 pa_log("Failed to parse sample specification and channel map");
1394 goto fail;
1395 }
1396
1397 frame_size = pa_frame_size(&ss);
1398
1399 nfrags = m->core->default_n_fragments;
1400 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1401 if (frag_size <= 0)
1402 frag_size = (uint32_t) frame_size;
1403 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1404 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1405
1406 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1407 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1408 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1409 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1410 pa_log("Failed to parse buffer metrics");
1411 goto fail;
1412 }
1413
1414 hwbuf_size = frag_size * nfrags;
1415 period_frames = frag_size/frame_size;
1416 tsched_frames = tsched_size/frame_size;
1417
1418 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1419 pa_log("Failed to parse mmap argument.");
1420 goto fail;
1421 }
1422
1423 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1424 pa_log("Failed to parse tsched argument.");
1425 goto fail;
1426 }
1427
1428 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1429 pa_log("Failed to parse ignore_dB argument.");
1430 goto fail;
1431 }
1432
1433 if (use_tsched && !pa_rtclock_hrtimer()) {
1434 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1435 use_tsched = FALSE;
1436 }
1437
1438 u = pa_xnew0(struct userdata, 1);
1439 u->core = m->core;
1440 u->module = m;
1441 u->use_mmap = use_mmap;
1442 u->use_tsched = use_tsched;
1443 u->first = TRUE;
1444 u->rtpoll = pa_rtpoll_new();
1445 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1446
1447 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1448 usec = pa_rtclock_usec();
1449 pa_smoother_set_time_offset(u->smoother, usec);
1450 pa_smoother_pause(u->smoother, usec);
1451
1452 b = use_mmap;
1453 d = use_tsched;
1454
1455 if (profile) {
1456
1457 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1458 pa_log("device_id= not set");
1459 goto fail;
1460 }
1461
1462 if (!(u->pcm_handle = pa_alsa_open_by_device_id_profile(
1463 dev_id,
1464 &u->device_name,
1465 &ss, &map,
1466 SND_PCM_STREAM_PLAYBACK,
1467 &nfrags, &period_frames, tsched_frames,
1468 &b, &d, profile)))
1469
1470 goto fail;
1471
1472 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1473
1474 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1475 dev_id,
1476 &u->device_name,
1477 &ss, &map,
1478 SND_PCM_STREAM_PLAYBACK,
1479 &nfrags, &period_frames, tsched_frames,
1480 &b, &d, &profile)))
1481
1482 goto fail;
1483
1484 } else {
1485
1486 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1487 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1488 &u->device_name,
1489 &ss, &map,
1490 SND_PCM_STREAM_PLAYBACK,
1491 &nfrags, &period_frames, tsched_frames,
1492 &b, &d, FALSE)))
1493 goto fail;
1494
1495 }
1496
1497 pa_assert(u->device_name);
1498 pa_log_info("Successfully opened device %s.", u->device_name);
1499
1500 if (profile)
1501 pa_log_info("Selected configuration '%s' (%s).", profile->description, profile->name);
1502
1503 if (use_mmap && !b) {
1504 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1505 u->use_mmap = use_mmap = FALSE;
1506 }
1507
1508 if (use_tsched && (!b || !d)) {
1509 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1510 u->use_tsched = use_tsched = FALSE;
1511 }
1512
1513 if (u->use_mmap)
1514 pa_log_info("Successfully enabled mmap() mode.");
1515
1516 if (u->use_tsched)
1517 pa_log_info("Successfully enabled timer-based scheduling mode.");
1518
1519 /* ALSA might tweak the sample spec, so recalculate the frame size */
1520 frame_size = pa_frame_size(&ss);
1521
1522 pa_alsa_find_mixer_and_elem(u->pcm_handle, &u->mixer_handle, &u->mixer_elem);
1523
1524 pa_sink_new_data_init(&data);
1525 data.driver = driver;
1526 data.module = m;
1527 data.card = card;
1528 set_sink_name(&data, ma, dev_id, u->device_name);
1529 pa_sink_new_data_set_sample_spec(&data, &ss);
1530 pa_sink_new_data_set_channel_map(&data, &map);
1531
1532 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1533 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1534 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1535 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1536 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1537
1538 if (profile) {
1539 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, profile->name);
1540 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, profile->description);
1541 }
1542
1543 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1544 pa_sink_new_data_done(&data);
1545
1546 if (!u->sink) {
1547 pa_log("Failed to create sink object");
1548 goto fail;
1549 }
1550
1551 u->sink->parent.process_msg = sink_process_msg;
1552 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1553 u->sink->userdata = u;
1554
1555 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1556 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1557
1558 u->frame_size = frame_size;
1559 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1560 u->nfragments = nfrags;
1561 u->hwbuf_size = u->fragment_size * nfrags;
1562 u->tsched_watermark = tsched_watermark;
1563 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1564
1565 if (use_tsched) {
1566 fix_min_sleep_wakeup(u);
1567 fix_tsched_watermark(u);
1568 }
1569
1570 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1571 u->sink->thread_info.max_request = u->hwbuf_size;
1572
1573 pa_sink_set_latency_range(u->sink,
1574 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1575 pa_bytes_to_usec(u->hwbuf_size, &ss));
1576
1577 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1578 nfrags, (long unsigned) u->fragment_size,
1579 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1580
1581 if (use_tsched)
1582 pa_log_info("Time scheduling watermark is %0.2fms",
1583 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1584
1585 if (update_sw_params(u) < 0)
1586 goto fail;
1587
1588 if (setup_mixer(u, ignore_dB) < 0)
1589 goto fail;
1590
1591 pa_alsa_dump(u->pcm_handle);
1592
1593 if (!(u->thread = pa_thread_new(thread_func, u))) {
1594 pa_log("Failed to create thread.");
1595 goto fail;
1596 }
1597
1598 /* Get initial mixer settings */
1599 if (data.volume_is_set) {
1600 if (u->sink->set_volume)
1601 u->sink->set_volume(u->sink);
1602 } else {
1603 if (u->sink->get_volume)
1604 u->sink->get_volume(u->sink);
1605 }
1606
1607 if (data.muted_is_set) {
1608 if (u->sink->set_mute)
1609 u->sink->set_mute(u->sink);
1610 } else {
1611 if (u->sink->get_mute)
1612 u->sink->get_mute(u->sink);
1613 }
1614
1615 pa_sink_put(u->sink);
1616
1617 return u->sink;
1618
1619 fail:
1620
1621 userdata_free(u);
1622
1623 return NULL;
1624 }
1625
1626 static void userdata_free(struct userdata *u) {
1627 pa_assert(u);
1628
1629 if (u->sink)
1630 pa_sink_unlink(u->sink);
1631
1632 if (u->thread) {
1633 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1634 pa_thread_free(u->thread);
1635 }
1636
1637 pa_thread_mq_done(&u->thread_mq);
1638
1639 if (u->sink)
1640 pa_sink_unref(u->sink);
1641
1642 if (u->memchunk.memblock)
1643 pa_memblock_unref(u->memchunk.memblock);
1644
1645 if (u->alsa_rtpoll_item)
1646 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1647
1648 if (u->rtpoll)
1649 pa_rtpoll_free(u->rtpoll);
1650
1651 if (u->mixer_fdl)
1652 pa_alsa_fdlist_free(u->mixer_fdl);
1653
1654 if (u->mixer_handle)
1655 snd_mixer_close(u->mixer_handle);
1656
1657 if (u->pcm_handle) {
1658 snd_pcm_drop(u->pcm_handle);
1659 snd_pcm_close(u->pcm_handle);
1660 }
1661
1662 if (u->smoother)
1663 pa_smoother_free(u->smoother);
1664
1665 pa_xfree(u->device_name);
1666 pa_xfree(u);
1667 }
1668
1669 void pa_alsa_sink_free(pa_sink *s) {
1670 struct userdata *u;
1671
1672 pa_sink_assert_ref(s);
1673 pa_assert_se(u = s->userdata);
1674
1675 userdata_free(u);
1676 }