]> code.delx.au - pulseaudio/blob - src/modules/module-alsa-sink.c
don't spam us with wakeup msgs in non-tsched mode
[pulseaudio] / src / modules / module-alsa-sink.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
8
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as published
11 by the Free Software Foundation; either version 2 of the License,
12 or (at your option) any later version.
13
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
18
19 You should have received a copy of the GNU Lesser General Public License
20 along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 USA.
23 ***/
24
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28
29 #include <stdio.h>
30
31 #include <asoundlib.h>
32
33 #include <pulse/xmalloc.h>
34 #include <pulse/util.h>
35 #include <pulse/timeval.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-util.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/thread.h>
47 #include <pulsecore/core-error.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/rtclock.h>
51 #include <pulsecore/time-smoother.h>
52
53 #include "alsa-util.h"
54 #include "module-alsa-sink-symdef.h"
55
56 PA_MODULE_AUTHOR("Lennart Poettering");
57 PA_MODULE_DESCRIPTION("ALSA Sink");
58 PA_MODULE_VERSION(PACKAGE_VERSION);
59 PA_MODULE_LOAD_ONCE(FALSE);
60 PA_MODULE_USAGE(
61 "sink_name=<name for the sink> "
62 "device=<ALSA device> "
63 "device_id=<ALSA card index> "
64 "format=<sample format> "
65 "rate=<sample rate> "
66 "channels=<number of channels> "
67 "channel_map=<channel map> "
68 "fragments=<number of fragments> "
69 "fragment_size=<fragment size> "
70 "mmap=<enable memory mapping?> "
71 "tsched=<enable system timer based scheduling mode?> "
72 "tsched_buffer_size=<buffer size when using timer based scheduling> "
73 "tsched_buffer_watermark=<lower fill watermark> "
74 "mixer_reset=<reset hw volume and mute settings to sane defaults when falling back to software?>");
75
76 static const char* const valid_modargs[] = {
77 "sink_name",
78 "device",
79 "device_id",
80 "format",
81 "rate",
82 "channels",
83 "channel_map",
84 "fragments",
85 "fragment_size",
86 "mmap",
87 "tsched",
88 "tsched_buffer_size",
89 "tsched_buffer_watermark",
90 "mixer_reset",
91 NULL
92 };
93
94 #define DEFAULT_DEVICE "default"
95 #define DEFAULT_TSCHED_BUFFER_USEC (5*PA_USEC_PER_SEC) /* 5s */
96 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
97 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
98 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
99
100 struct userdata {
101 pa_core *core;
102 pa_module *module;
103 pa_sink *sink;
104
105 pa_thread *thread;
106 pa_thread_mq thread_mq;
107 pa_rtpoll *rtpoll;
108
109 snd_pcm_t *pcm_handle;
110
111 pa_alsa_fdlist *mixer_fdl;
112 snd_mixer_t *mixer_handle;
113 snd_mixer_elem_t *mixer_elem;
114 long hw_volume_max, hw_volume_min;
115 long hw_dB_max, hw_dB_min;
116 pa_bool_t hw_dB_supported;
117
118 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
119 unsigned nfragments;
120 pa_memchunk memchunk;
121
122 char *device_name;
123
124 pa_bool_t use_mmap, use_tsched;
125
126 pa_bool_t first, after_rewind;
127
128 pa_rtpoll_item *alsa_rtpoll_item;
129
130 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
131
132 pa_smoother *smoother;
133 int64_t frame_index;
134 uint64_t since_start;
135
136 snd_pcm_sframes_t hwbuf_unused_frames;
137 };
138
139 static void fix_tsched_watermark(struct userdata *u) {
140 size_t max_use;
141 size_t min_sleep, min_wakeup;
142 pa_assert(u);
143
144 max_use = u->hwbuf_size - u->hwbuf_unused_frames * u->frame_size;
145
146 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
147 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
148
149 if (min_sleep > max_use/2)
150 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
151 if (min_sleep < u->frame_size)
152 min_sleep = u->frame_size;
153
154 if (min_wakeup > max_use/2)
155 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
156 if (min_wakeup < u->frame_size)
157 min_wakeup = u->frame_size;
158
159 if (u->tsched_watermark > max_use-min_sleep)
160 u->tsched_watermark = max_use-min_sleep;
161
162 if (u->tsched_watermark < min_wakeup)
163 u->tsched_watermark = min_wakeup;
164 }
165
166 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
167 pa_usec_t usec, wm;
168
169 pa_assert(sleep_usec);
170 pa_assert(process_usec);
171
172 pa_assert(u);
173
174 usec = pa_sink_get_requested_latency_within_thread(u->sink);
175
176 if (usec == (pa_usec_t) -1)
177 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
178
179 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
180
181 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
182
183 if (usec >= wm) {
184 *sleep_usec = usec - wm;
185 *process_usec = wm;
186 } else
187 *process_usec = *sleep_usec = usec / 2;
188
189 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
190 }
191
192 static int try_recover(struct userdata *u, const char *call, int err) {
193 pa_assert(u);
194 pa_assert(call);
195 pa_assert(err < 0);
196
197 pa_log_debug("%s: %s", call, snd_strerror(err));
198
199 pa_assert(err != -EAGAIN);
200
201 if (err == -EPIPE)
202 pa_log_debug("%s: Buffer underrun!", call);
203
204 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
205 u->first = TRUE;
206 u->since_start = 0;
207 return 0;
208 }
209
210 pa_log("%s: %s", call, snd_strerror(err));
211 return -1;
212 }
213
214 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
215 size_t left_to_play;
216
217 if (n*u->frame_size < u->hwbuf_size)
218 left_to_play = u->hwbuf_size - (n*u->frame_size);
219 else
220 left_to_play = 0;
221
222 if (left_to_play > 0) {
223 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
224 } else if (!u->first && !u->after_rewind) {
225 pa_log_info("Underrun!");
226
227 if (u->use_tsched) {
228 size_t old_watermark = u->tsched_watermark;
229
230 u->tsched_watermark *= 2;
231 fix_tsched_watermark(u);
232
233 if (old_watermark != u->tsched_watermark)
234 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
235 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
236 }
237 }
238
239 return left_to_play;
240 }
241
242 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec) {
243 int work_done = 0;
244 pa_usec_t max_sleep_usec, process_usec;
245 size_t left_to_play;
246
247 pa_assert(u);
248 pa_sink_assert_ref(u->sink);
249
250 if (u->use_tsched)
251 hw_sleep_time(u, &max_sleep_usec, &process_usec);
252
253 for (;;) {
254 snd_pcm_sframes_t n;
255 int r;
256
257 snd_pcm_hwsync(u->pcm_handle);
258
259 /* First we determine how many samples are missing to fill the
260 * buffer up to 100% */
261
262 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
263
264 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
265 continue;
266
267 return r;
268 }
269
270 left_to_play = check_left_to_play(u, n);
271
272 if (u->use_tsched)
273
274 /* We won't fill up the playback buffer before at least
275 * half the sleep time is over because otherwise we might
276 * ask for more data from the clients then they expect. We
277 * need to guarantee that clients only have to keep around
278 * a single hw buffer length. */
279
280 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > max_sleep_usec/2)
281 break;
282
283 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
284 break;
285
286 n -= u->hwbuf_unused_frames;
287
288 /* pa_log_debug("Filling up"); */
289
290 for (;;) {
291 pa_memchunk chunk;
292 void *p;
293 int err;
294 const snd_pcm_channel_area_t *areas;
295 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
296
297 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
298
299 if (PA_UNLIKELY((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0)) {
300
301 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
302 continue;
303
304 return r;
305 }
306
307 /* Make sure that if these memblocks need to be copied they will fit into one slot */
308 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
309 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
310
311 /* Check these are multiples of 8 bit */
312 pa_assert((areas[0].first & 7) == 0);
313 pa_assert((areas[0].step & 7)== 0);
314
315 /* We assume a single interleaved memory buffer */
316 pa_assert((areas[0].first >> 3) == 0);
317 pa_assert((areas[0].step >> 3) == u->frame_size);
318
319 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
320
321 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
322 chunk.length = pa_memblock_get_length(chunk.memblock);
323 chunk.index = 0;
324
325 pa_sink_render_into_full(u->sink, &chunk);
326
327 /* FIXME: Maybe we can do something to keep this memory block
328 * a little bit longer around? */
329 pa_memblock_unref_fixed(chunk.memblock);
330
331 if (PA_UNLIKELY((err = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
332
333 if ((r = try_recover(u, "snd_pcm_mmap_commit", err)) == 0)
334 continue;
335
336 return r;
337 }
338
339 work_done = 1;
340
341 u->frame_index += frames;
342 u->since_start += frames * u->frame_size;
343
344 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
345
346 if (frames >= (snd_pcm_uframes_t) n)
347 break;
348
349 n -= frames;
350 }
351 }
352
353 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
354 return work_done;
355 }
356
357 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec) {
358 int work_done = 0;
359 pa_usec_t max_sleep_usec, process_usec;
360 size_t left_to_play;
361
362 pa_assert(u);
363 pa_sink_assert_ref(u->sink);
364
365 if (u->use_tsched)
366 hw_sleep_time(u, &max_sleep_usec, &process_usec);
367
368 for (;;) {
369 snd_pcm_sframes_t n;
370 int r;
371
372 snd_pcm_hwsync(u->pcm_handle);
373
374 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
375
376 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
377 continue;
378
379 return r;
380 }
381
382 left_to_play = check_left_to_play(u, n);
383
384 if (u->use_tsched)
385
386 /* We won't fill up the playback buffer before at least
387 * half the sleep time is over because otherwise we might
388 * ask for more data from the clients then they expect. We
389 * need to guarantee that clients only have to keep around
390 * a single hw buffer length. */
391
392 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > max_sleep_usec/2)
393 break;
394
395 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
396 break;
397
398 n -= u->hwbuf_unused_frames;
399
400 for (;;) {
401 snd_pcm_sframes_t frames;
402 void *p;
403
404 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
405
406 if (u->memchunk.length <= 0)
407 pa_sink_render(u->sink, n * u->frame_size, &u->memchunk);
408
409 pa_assert(u->memchunk.length > 0);
410
411 frames = u->memchunk.length / u->frame_size;
412
413 if (frames > n)
414 frames = n;
415
416 p = pa_memblock_acquire(u->memchunk.memblock);
417 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, frames);
418 pa_memblock_release(u->memchunk.memblock);
419
420 pa_assert(frames != 0);
421
422 if (PA_UNLIKELY(frames < 0)) {
423
424 if ((r = try_recover(u, "snd_pcm_writei", n)) == 0)
425 continue;
426
427 return r;
428 }
429
430 u->memchunk.index += frames * u->frame_size;
431 u->memchunk.length -= frames * u->frame_size;
432
433 if (u->memchunk.length <= 0) {
434 pa_memblock_unref(u->memchunk.memblock);
435 pa_memchunk_reset(&u->memchunk);
436 }
437
438 work_done = 1;
439
440 u->frame_index += frames;
441 u->since_start += frames * u->frame_size;
442
443 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
444
445 if (frames >= n)
446 break;
447
448 n -= frames;
449 }
450 }
451
452 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
453 return work_done;
454 }
455
456 static void update_smoother(struct userdata *u) {
457 snd_pcm_sframes_t delay = 0;
458 int64_t frames;
459 int err;
460 pa_usec_t now1, now2;
461 /* struct timeval timestamp; */
462 snd_pcm_status_t *status;
463
464 snd_pcm_status_alloca(&status);
465
466 pa_assert(u);
467 pa_assert(u->pcm_handle);
468
469 /* Let's update the time smoother */
470
471 snd_pcm_hwsync(u->pcm_handle);
472 snd_pcm_avail_update(u->pcm_handle);
473
474 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
475 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
476 /* return; */
477 /* } */
478
479 /* delay = snd_pcm_status_get_delay(status); */
480
481 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
482 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
483 return;
484 }
485
486 frames = u->frame_index - delay;
487
488 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
489
490 /* snd_pcm_status_get_tstamp(status, &timestamp); */
491 /* pa_rtclock_from_wallclock(&timestamp); */
492 /* now1 = pa_timeval_load(&timestamp); */
493
494 now1 = pa_rtclock_usec();
495 now2 = pa_bytes_to_usec(frames * u->frame_size, &u->sink->sample_spec);
496 pa_smoother_put(u->smoother, now1, now2);
497 }
498
499 static pa_usec_t sink_get_latency(struct userdata *u) {
500 pa_usec_t r = 0;
501 int64_t delay;
502 pa_usec_t now1, now2;
503
504 pa_assert(u);
505
506 now1 = pa_rtclock_usec();
507 now2 = pa_smoother_get(u->smoother, now1);
508
509 delay = (int64_t) pa_bytes_to_usec(u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
510
511 if (delay > 0)
512 r = (pa_usec_t) delay;
513
514 if (u->memchunk.memblock)
515 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
516
517 return r;
518 }
519
520 static int build_pollfd(struct userdata *u) {
521 pa_assert(u);
522 pa_assert(u->pcm_handle);
523
524 if (u->alsa_rtpoll_item)
525 pa_rtpoll_item_free(u->alsa_rtpoll_item);
526
527 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
528 return -1;
529
530 return 0;
531 }
532
533 static int suspend(struct userdata *u) {
534 pa_assert(u);
535 pa_assert(u->pcm_handle);
536
537 pa_smoother_pause(u->smoother, pa_rtclock_usec());
538
539 /* Let's suspend */
540 snd_pcm_drain(u->pcm_handle);
541 snd_pcm_close(u->pcm_handle);
542 u->pcm_handle = NULL;
543
544 if (u->alsa_rtpoll_item) {
545 pa_rtpoll_item_free(u->alsa_rtpoll_item);
546 u->alsa_rtpoll_item = NULL;
547 }
548
549 pa_log_info("Device suspended...");
550
551 return 0;
552 }
553
554 static int update_sw_params(struct userdata *u) {
555 snd_pcm_uframes_t avail_min;
556 int err;
557
558 pa_assert(u);
559
560 /* Use the full buffer if noone asked us for anything specific */
561 u->hwbuf_unused_frames = 0;
562
563 if (u->use_tsched) {
564 pa_usec_t latency;
565
566 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
567 size_t b;
568
569 pa_log_debug("latency set to %0.2f", (double) latency / PA_USEC_PER_MSEC);
570
571 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
572
573 /* We need at least one sample in our buffer */
574
575 if (PA_UNLIKELY(b < u->frame_size))
576 b = u->frame_size;
577
578 u->hwbuf_unused_frames =
579 PA_LIKELY(b < u->hwbuf_size) ?
580 ((u->hwbuf_size - b) / u->frame_size) : 0;
581
582 fix_tsched_watermark(u);
583 }
584 }
585
586 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
587
588 /* We need at last one frame in the used part of the buffer */
589 avail_min = u->hwbuf_unused_frames + 1;
590
591 if (u->use_tsched) {
592 pa_usec_t sleep_usec, process_usec;
593
594 hw_sleep_time(u, &sleep_usec, &process_usec);
595 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec);
596 }
597
598 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
599
600 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
601 pa_log("Failed to set software parameters: %s", snd_strerror(err));
602 return err;
603 }
604
605 return 0;
606 }
607
608 static int unsuspend(struct userdata *u) {
609 pa_sample_spec ss;
610 int err;
611 pa_bool_t b, d;
612 unsigned nfrags;
613 snd_pcm_uframes_t period_size;
614
615 pa_assert(u);
616 pa_assert(!u->pcm_handle);
617
618 pa_log_info("Trying resume...");
619
620 snd_config_update_free_global();
621 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
622 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
623 goto fail;
624 }
625
626 ss = u->sink->sample_spec;
627 nfrags = u->nfragments;
628 period_size = u->fragment_size / u->frame_size;
629 b = u->use_mmap;
630 d = u->use_tsched;
631
632 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
633 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
634 goto fail;
635 }
636
637 if (b != u->use_mmap || d != u->use_tsched) {
638 pa_log_warn("Resume failed, couldn't get original access mode.");
639 goto fail;
640 }
641
642 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
643 pa_log_warn("Resume failed, couldn't restore original sample settings.");
644 goto fail;
645 }
646
647 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
648 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
649 goto fail;
650 }
651
652 if (update_sw_params(u) < 0)
653 goto fail;
654
655 if (build_pollfd(u) < 0)
656 goto fail;
657
658 /* FIXME: We need to reload the volume somehow */
659
660 u->first = TRUE;
661 u->since_start = 0;
662
663 pa_log_info("Resumed successfully...");
664
665 return 0;
666
667 fail:
668 if (u->pcm_handle) {
669 snd_pcm_close(u->pcm_handle);
670 u->pcm_handle = NULL;
671 }
672
673 return -1;
674 }
675
676 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
677 struct userdata *u = PA_SINK(o)->userdata;
678
679 switch (code) {
680
681 case PA_SINK_MESSAGE_GET_LATENCY: {
682 pa_usec_t r = 0;
683
684 if (u->pcm_handle)
685 r = sink_get_latency(u);
686
687 *((pa_usec_t*) data) = r;
688
689 return 0;
690 }
691
692 case PA_SINK_MESSAGE_SET_STATE:
693
694 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
695
696 case PA_SINK_SUSPENDED:
697 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
698
699 if (suspend(u) < 0)
700 return -1;
701
702 break;
703
704 case PA_SINK_IDLE:
705 case PA_SINK_RUNNING:
706
707 if (u->sink->thread_info.state == PA_SINK_INIT) {
708 if (build_pollfd(u) < 0)
709 return -1;
710 }
711
712 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
713 if (unsuspend(u) < 0)
714 return -1;
715 }
716
717 break;
718
719 case PA_SINK_UNLINKED:
720 case PA_SINK_INIT:
721 ;
722 }
723
724 break;
725 }
726
727 return pa_sink_process_msg(o, code, data, offset, chunk);
728 }
729
730 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
731 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
732
733 pa_assert(u);
734 pa_assert(u->mixer_handle);
735
736 if (mask == SND_CTL_EVENT_MASK_REMOVE)
737 return 0;
738
739 if (mask & SND_CTL_EVENT_MASK_VALUE) {
740 pa_sink_get_volume(u->sink);
741 pa_sink_get_mute(u->sink);
742 }
743
744 return 0;
745 }
746
747 static int sink_get_volume_cb(pa_sink *s) {
748 struct userdata *u = s->userdata;
749 int err;
750 int i;
751
752 pa_assert(u);
753 pa_assert(u->mixer_elem);
754
755 for (i = 0; i < s->sample_spec.channels; i++) {
756 long alsa_vol;
757
758 pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
759
760 if (u->hw_dB_supported) {
761
762 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) >= 0) {
763 s->volume.values[i] = pa_sw_volume_from_dB(alsa_vol / 100.0);
764 continue;
765 }
766
767 u->hw_dB_supported = FALSE;
768 }
769
770 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
771 goto fail;
772
773 s->volume.values[i] = (pa_volume_t) roundf(((float) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
774 }
775
776 return 0;
777
778 fail:
779 pa_log_error("Unable to read volume: %s", snd_strerror(err));
780
781 return -1;
782 }
783
784 static int sink_set_volume_cb(pa_sink *s) {
785 struct userdata *u = s->userdata;
786 int err;
787 int i;
788
789 pa_assert(u);
790 pa_assert(u->mixer_elem);
791
792 for (i = 0; i < s->sample_spec.channels; i++) {
793 long alsa_vol;
794 pa_volume_t vol;
795
796 pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
797
798 vol = PA_MIN(s->volume.values[i], PA_VOLUME_NORM);
799
800 if (u->hw_dB_supported) {
801 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
802 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
803
804 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, -1)) >= 0) {
805
806 if (snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol) >= 0)
807 s->volume.values[i] = pa_sw_volume_from_dB(alsa_vol / 100.0);
808
809 continue;
810 }
811
812 u->hw_dB_supported = FALSE;
813
814 }
815
816 alsa_vol = (long) roundf(((float) vol * (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
817 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
818
819 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
820 goto fail;
821
822 if (snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol) >= 0)
823 s->volume.values[i] = (pa_volume_t) roundf(((float) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
824 }
825
826 return 0;
827
828 fail:
829 pa_log_error("Unable to set volume: %s", snd_strerror(err));
830
831 return -1;
832 }
833
834 static int sink_get_mute_cb(pa_sink *s) {
835 struct userdata *u = s->userdata;
836 int err, sw;
837
838 pa_assert(u);
839 pa_assert(u->mixer_elem);
840
841 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
842 pa_log_error("Unable to get switch: %s", snd_strerror(err));
843 return -1;
844 }
845
846 s->muted = !sw;
847
848 return 0;
849 }
850
851 static int sink_set_mute_cb(pa_sink *s) {
852 struct userdata *u = s->userdata;
853 int err;
854
855 pa_assert(u);
856 pa_assert(u->mixer_elem);
857
858 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
859 pa_log_error("Unable to set switch: %s", snd_strerror(err));
860 return -1;
861 }
862
863 return 0;
864 }
865
866 static void sink_update_requested_latency_cb(pa_sink *s) {
867 struct userdata *u = s->userdata;
868 snd_pcm_sframes_t before;
869 pa_assert(u);
870
871 before = u->hwbuf_unused_frames;
872 update_sw_params(u);
873
874 /* Let's check whether we now use only a smaller part of the
875 buffer then before. If so, we need to make sure that subsequent
876 rewinds are relative to the new maxium fill level and not to the
877 current fill level. Thus, let's do a full rewind once, to clear
878 things up. */
879
880 if (u->hwbuf_unused_frames > before) {
881 pa_log_debug("Requesting rewind due to latency change.");
882 pa_sink_request_rewind(s, 0);
883 }
884 }
885
886 static int process_rewind(struct userdata *u) {
887 snd_pcm_sframes_t unused;
888 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
889 pa_assert(u);
890
891 /* Figure out how much we shall rewind and reset the counter */
892 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
893 u->sink->thread_info.rewind_nbytes = 0;
894
895 pa_assert(rewind_nbytes > 0);
896 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
897
898 snd_pcm_hwsync(u->pcm_handle);
899 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
900 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror(unused));
901 return -1;
902 }
903
904 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
905
906 if (u->hwbuf_size > unused_nbytes)
907 limit_nbytes = u->hwbuf_size - unused_nbytes;
908 else
909 limit_nbytes = 0;
910
911 if (rewind_nbytes > limit_nbytes)
912 rewind_nbytes = limit_nbytes;
913
914 if (rewind_nbytes > 0) {
915 snd_pcm_sframes_t in_frames, out_frames;
916
917 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
918
919 in_frames = (snd_pcm_sframes_t) rewind_nbytes / u->frame_size;
920 pa_log_debug("before: %lu", (unsigned long) in_frames);
921 if ((out_frames = snd_pcm_rewind(u->pcm_handle, in_frames)) < 0) {
922 pa_log("snd_pcm_rewind() failed: %s", snd_strerror(out_frames));
923 return -1;
924 }
925 pa_log_debug("after: %lu", (unsigned long) out_frames);
926
927 rewind_nbytes = out_frames * u->frame_size;
928
929 if (rewind_nbytes <= 0)
930 pa_log_info("Tried rewind, but was apparently not possible.");
931 else {
932 u->frame_index -= out_frames;
933 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
934 pa_sink_process_rewind(u->sink, rewind_nbytes);
935
936 u->after_rewind = TRUE;
937 }
938 } else
939 pa_log_debug("Mhmm, actually there is nothing to rewind.");
940
941 return 0;
942 }
943
944 static void thread_func(void *userdata) {
945 struct userdata *u = userdata;
946
947 pa_assert(u);
948
949 pa_log_debug("Thread starting up");
950
951 if (u->core->realtime_scheduling)
952 pa_make_realtime(u->core->realtime_priority);
953
954 pa_thread_mq_install(&u->thread_mq);
955 pa_rtpoll_install(u->rtpoll);
956
957 for (;;) {
958 int ret;
959
960 /* pa_log_debug("loop"); */
961
962 /* Render some data and write it to the dsp */
963 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
964 int work_done;
965 pa_usec_t sleep_usec;
966
967 if (u->sink->thread_info.rewind_nbytes > 0)
968 if (process_rewind(u) < 0)
969 goto fail;
970
971 if (u->use_mmap)
972 work_done = mmap_write(u, &sleep_usec);
973 else
974 work_done = unix_write(u, &sleep_usec);
975
976 if (work_done < 0)
977 goto fail;
978
979 /* pa_log_debug("work_done = %i", work_done); */
980
981 if (work_done) {
982
983 if (u->first) {
984 pa_log_info("Starting playback.");
985 snd_pcm_start(u->pcm_handle);
986
987 pa_smoother_resume(u->smoother, pa_rtclock_usec());
988 }
989
990 update_smoother(u);
991 }
992
993 if (u->use_tsched) {
994 pa_usec_t cusec;
995
996 if (u->since_start <= u->hwbuf_size) {
997
998 /* USB devices on ALSA seem to hit a buffer
999 * underrun during the first iterations much
1000 * quicker then we calculate here, probably due to
1001 * the transport latency. To accomodate for that
1002 * we artificially decrease the sleep time until
1003 * we have filled the buffer at least once
1004 * completely.*/
1005
1006 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1007 sleep_usec /= 2;
1008 }
1009
1010 /* OK, the playback buffer is now full, let's
1011 * calculate when to wake up next */
1012 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1013
1014 /* Convert from the sound card time domain to the
1015 * system time domain */
1016 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1017
1018 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1019
1020 /* We don't trust the conversion, so we wake up whatever comes first */
1021 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1022 }
1023
1024 u->first = FALSE;
1025 u->after_rewind = FALSE;
1026
1027 } else if (u->use_tsched)
1028
1029 /* OK, we're in an invalid state, let's disable our timers */
1030 pa_rtpoll_set_timer_disabled(u->rtpoll);
1031
1032 /* Hmm, nothing to do. Let's sleep */
1033 if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
1034 goto fail;
1035
1036 if (ret == 0)
1037 goto finish;
1038
1039 /* Tell ALSA about this and process its response */
1040 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1041 struct pollfd *pollfd;
1042 unsigned short revents = 0;
1043 int err;
1044 unsigned n;
1045
1046 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1047
1048 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1049 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1050 goto fail;
1051 }
1052
1053 if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
1054 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1055 goto fail;
1056
1057 u->first = TRUE;
1058 u->since_start = 0;
1059 }
1060
1061 if (revents && u->use_tsched)
1062 pa_log_debug("Wakeup from ALSA! (%i)", revents);
1063 }
1064 }
1065
1066 fail:
1067 /* If this was no regular exit from the loop we have to continue
1068 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1069 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1070 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1071
1072 finish:
1073 pa_log_debug("Thread shutting down");
1074 }
1075
1076 int pa__init(pa_module*m) {
1077
1078 pa_modargs *ma = NULL;
1079 struct userdata *u = NULL;
1080 const char *dev_id;
1081 pa_sample_spec ss;
1082 pa_channel_map map;
1083 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1084 snd_pcm_uframes_t period_frames, tsched_frames;
1085 size_t frame_size;
1086 snd_pcm_info_t *pcm_info = NULL;
1087 int err;
1088 const char *name;
1089 char *name_buf = NULL;
1090 pa_bool_t namereg_fail;
1091 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, mixer_reset = TRUE;
1092 pa_usec_t usec;
1093 pa_sink_new_data data;
1094
1095 snd_pcm_info_alloca(&pcm_info);
1096
1097 pa_assert(m);
1098
1099 pa_alsa_redirect_errors_inc();
1100
1101 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1102 pa_log("Failed to parse module arguments");
1103 goto fail;
1104 }
1105
1106 ss = m->core->default_sample_spec;
1107 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1108 pa_log("Failed to parse sample specification and channel map");
1109 goto fail;
1110 }
1111
1112 frame_size = pa_frame_size(&ss);
1113
1114 nfrags = m->core->default_n_fragments;
1115 frag_size = pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1116 if (frag_size <= 0)
1117 frag_size = frame_size;
1118 tsched_size = pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1119 tsched_watermark = pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1120
1121 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1122 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1123 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1124 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1125 pa_log("Failed to parse buffer metrics");
1126 goto fail;
1127 }
1128
1129 hwbuf_size = frag_size * nfrags;
1130 period_frames = frag_size/frame_size;
1131 tsched_frames = tsched_size/frame_size;
1132
1133 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1134 pa_log("Failed to parse mmap argument.");
1135 goto fail;
1136 }
1137
1138 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1139 pa_log("Failed to parse timer_scheduling argument.");
1140 goto fail;
1141 }
1142
1143 if (use_tsched && !pa_rtclock_hrtimer()) {
1144 pa_log("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1145 use_tsched = FALSE;
1146 }
1147
1148 if (pa_modargs_get_value_boolean(ma, "mixer_reset", &mixer_reset) < 0) {
1149 pa_log("Failed to parse mixer_reset argument.");
1150 goto fail;
1151 }
1152
1153 u = pa_xnew0(struct userdata, 1);
1154 u->core = m->core;
1155 u->module = m;
1156 m->userdata = u;
1157 u->use_mmap = use_mmap;
1158 u->use_tsched = use_tsched;
1159 u->first = TRUE;
1160 u->since_start = 0;
1161 u->after_rewind = FALSE;
1162 u->rtpoll = pa_rtpoll_new();
1163 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1164 u->alsa_rtpoll_item = NULL;
1165
1166 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1167 usec = pa_rtclock_usec();
1168 pa_smoother_set_time_offset(u->smoother, usec);
1169 pa_smoother_pause(u->smoother, usec);
1170
1171 snd_config_update_free_global();
1172
1173 b = use_mmap;
1174 d = use_tsched;
1175
1176 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1177
1178 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1179 dev_id,
1180 &u->device_name,
1181 &ss, &map,
1182 SND_PCM_STREAM_PLAYBACK,
1183 &nfrags, &period_frames, tsched_frames,
1184 &b, &d)))
1185
1186 goto fail;
1187
1188 } else {
1189
1190 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1191 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1192 &u->device_name,
1193 &ss, &map,
1194 SND_PCM_STREAM_PLAYBACK,
1195 &nfrags, &period_frames, tsched_frames,
1196 &b, &d)))
1197 goto fail;
1198
1199 }
1200
1201 pa_assert(u->device_name);
1202 pa_log_info("Successfully opened device %s.", u->device_name);
1203
1204 if (use_mmap && !b) {
1205 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1206 u->use_mmap = use_mmap = FALSE;
1207 }
1208
1209 if (use_tsched && (!b || !d)) {
1210 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1211 u->use_tsched = use_tsched = FALSE;
1212 }
1213
1214 if (u->use_mmap)
1215 pa_log_info("Successfully enabled mmap() mode.");
1216
1217 if (u->use_tsched)
1218 pa_log_info("Successfully enabled timer-based scheduling mode.");
1219
1220 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1221 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1222 goto fail;
1223 }
1224
1225 /* ALSA might tweak the sample spec, so recalculate the frame size */
1226 frame_size = pa_frame_size(&ss);
1227
1228 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1229 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1230 else {
1231 pa_bool_t found = FALSE;
1232
1233 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1234 found = TRUE;
1235 else {
1236 snd_pcm_info_t *info;
1237
1238 snd_pcm_info_alloca(&info);
1239
1240 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1241 char *md;
1242 int card;
1243
1244 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1245
1246 md = pa_sprintf_malloc("hw:%i", card);
1247
1248 if (strcmp(u->device_name, md))
1249 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1250 found = TRUE;
1251 pa_xfree(md);
1252 }
1253 }
1254 }
1255
1256 if (found)
1257 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM")))
1258 found = FALSE;
1259
1260 if (!found) {
1261 snd_mixer_close(u->mixer_handle);
1262 u->mixer_handle = NULL;
1263 }
1264 }
1265
1266 if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
1267 namereg_fail = TRUE;
1268 else {
1269 name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
1270 namereg_fail = FALSE;
1271 }
1272
1273 pa_sink_new_data_init(&data);
1274 data.driver = __FILE__;
1275 data.module = m;
1276 pa_sink_new_data_set_name(&data, name);
1277 data.namereg_fail = namereg_fail;
1278 pa_sink_new_data_set_sample_spec(&data, &ss);
1279 pa_sink_new_data_set_channel_map(&data, &map);
1280
1281 pa_alsa_init_proplist(data.proplist, pcm_info);
1282 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1283 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1284 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1285 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1286
1287 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1288 pa_sink_new_data_done(&data);
1289 pa_xfree(name_buf);
1290
1291 if (!u->sink) {
1292 pa_log("Failed to create sink object");
1293 goto fail;
1294 }
1295
1296 u->sink->parent.process_msg = sink_process_msg;
1297 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1298 u->sink->userdata = u;
1299
1300 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1301 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1302
1303 u->frame_size = frame_size;
1304 u->fragment_size = frag_size = period_frames * frame_size;
1305 u->nfragments = nfrags;
1306 u->hwbuf_size = u->fragment_size * nfrags;
1307 u->hwbuf_unused_frames = 0;
1308 u->tsched_watermark = tsched_watermark;
1309 u->frame_index = 0;
1310 u->hw_dB_supported = FALSE;
1311 u->hw_dB_min = u->hw_dB_max = 0;
1312 u->hw_volume_min = u->hw_volume_max = 0;
1313
1314 if (use_tsched)
1315 fix_tsched_watermark(u);
1316
1317 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1318 u->sink->max_latency = pa_bytes_to_usec(u->hwbuf_size, &ss);
1319 if (!use_tsched)
1320 u->sink->min_latency = u->sink->max_latency;
1321
1322 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1323 nfrags, (long unsigned) u->fragment_size,
1324 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1325
1326 if (use_tsched)
1327 pa_log_info("Time scheduling watermark is %0.2fms",
1328 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1329
1330 if (update_sw_params(u) < 0)
1331 goto fail;
1332
1333 pa_memchunk_reset(&u->memchunk);
1334
1335 if (u->mixer_handle) {
1336 pa_assert(u->mixer_elem);
1337
1338 if (snd_mixer_selem_has_playback_volume(u->mixer_elem))
1339
1340 if (pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0 &&
1341 snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) >= 0) {
1342
1343 pa_bool_t suitable = TRUE;
1344
1345 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1346
1347 if (u->hw_volume_min > u->hw_volume_max) {
1348
1349 pa_log_info("Minimal volume %li larger than maximum volume %li. Strange stuff Falling back to software volume control.", u->hw_volume_min, u->hw_volume_max);
1350 suitable = FALSE;
1351
1352 } else if (u->hw_volume_max - u->hw_volume_min < 3) {
1353
1354 pa_log_info("Device has less than 4 volume levels. Falling back to software volume control.");
1355 suitable = FALSE;
1356
1357 } else if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) >= 0) {
1358
1359 /* u->hw_dB_max = 0; u->hw_dB_min = -3000; Use this to make valgrind shut up */
1360
1361 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", u->hw_dB_min/100.0, u->hw_dB_max/100.0);
1362
1363 /* Let's see if this thing actually is useful for muting */
1364 if (u->hw_dB_min > -6000) {
1365 pa_log_info("Device cannot attenuate for more than -60 dB (only %0.2f dB supported), falling back to software volume control.", ((double) u->hw_dB_min) / 100);
1366
1367 suitable = FALSE;
1368 } else if (u->hw_dB_max < 0) {
1369
1370 pa_log_info("Device is still attenuated at maximum volume setting (%0.2f dB is maximum). Strange stuff. Falling back to software volume control.", ((double) u->hw_dB_max) / 100);
1371 suitable = FALSE;
1372
1373 } else if (u->hw_dB_min >= u->hw_dB_max) {
1374
1375 pa_log_info("Minimal dB (%0.2f) larger or equal to maximum dB (%0.2f). Strange stuff. Falling back to software volume control.", ((double) u->hw_dB_min) / 100, ((double) u->hw_dB_max) / 100);
1376 suitable = FALSE;
1377
1378 } else {
1379
1380 if (u->hw_dB_max > 0) {
1381 /* dB > 0 means overamplification, and clipping, we don't want that here */
1382 pa_log_info("Device can do overamplification for %0.2f dB. Limiting to 0 db", ((double) u->hw_dB_max) / 100);
1383 u->hw_dB_max = 0;
1384 }
1385
1386 u->hw_dB_supported = TRUE;
1387 }
1388 }
1389
1390 if (suitable) {
1391 u->sink->get_volume = sink_get_volume_cb;
1392 u->sink->set_volume = sink_set_volume_cb;
1393 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1394 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1395
1396 } else if (mixer_reset) {
1397 pa_log_info("Using software volume control. Trying to reset sound card to 0 dB.");
1398 pa_alsa_0dB_playback(u->mixer_elem);
1399 } else
1400 pa_log_info("Using software volume control. Leaving hw mixer controls untouched.");
1401 }
1402
1403 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1404 u->sink->get_mute = sink_get_mute_cb;
1405 u->sink->set_mute = sink_set_mute_cb;
1406 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1407 }
1408
1409 u->mixer_fdl = pa_alsa_fdlist_new();
1410
1411 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1412 pa_log("Failed to initialize file descriptor monitoring");
1413 goto fail;
1414 }
1415
1416 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1417 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1418 } else
1419 u->mixer_fdl = NULL;
1420
1421 pa_alsa_dump(u->pcm_handle);
1422
1423 if (!(u->thread = pa_thread_new(thread_func, u))) {
1424 pa_log("Failed to create thread.");
1425 goto fail;
1426 }
1427
1428 /* Get initial mixer settings */
1429 if (data.volume_is_set) {
1430 if (u->sink->set_volume)
1431 u->sink->set_volume(u->sink);
1432 } else {
1433 if (u->sink->get_volume)
1434 u->sink->get_volume(u->sink);
1435 }
1436
1437 if (data.muted_is_set) {
1438 if (u->sink->set_mute)
1439 u->sink->set_mute(u->sink);
1440 } else {
1441 if (u->sink->get_mute)
1442 u->sink->get_mute(u->sink);
1443 }
1444
1445 pa_sink_put(u->sink);
1446
1447 pa_modargs_free(ma);
1448
1449 return 0;
1450
1451 fail:
1452
1453 if (ma)
1454 pa_modargs_free(ma);
1455
1456 pa__done(m);
1457
1458 return -1;
1459 }
1460
1461 void pa__done(pa_module*m) {
1462 struct userdata *u;
1463
1464 pa_assert(m);
1465
1466 if (!(u = m->userdata)) {
1467 pa_alsa_redirect_errors_dec();
1468 return;
1469 }
1470
1471 if (u->sink)
1472 pa_sink_unlink(u->sink);
1473
1474 if (u->thread) {
1475 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1476 pa_thread_free(u->thread);
1477 }
1478
1479 pa_thread_mq_done(&u->thread_mq);
1480
1481 if (u->sink)
1482 pa_sink_unref(u->sink);
1483
1484 if (u->memchunk.memblock)
1485 pa_memblock_unref(u->memchunk.memblock);
1486
1487 if (u->alsa_rtpoll_item)
1488 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1489
1490 if (u->rtpoll)
1491 pa_rtpoll_free(u->rtpoll);
1492
1493 if (u->mixer_fdl)
1494 pa_alsa_fdlist_free(u->mixer_fdl);
1495
1496 if (u->mixer_handle)
1497 snd_mixer_close(u->mixer_handle);
1498
1499 if (u->pcm_handle) {
1500 snd_pcm_drop(u->pcm_handle);
1501 snd_pcm_close(u->pcm_handle);
1502 }
1503
1504 if (u->smoother)
1505 pa_smoother_free(u->smoother);
1506
1507 pa_xfree(u->device_name);
1508 pa_xfree(u);
1509
1510 snd_config_update_free_global();
1511
1512 pa_alsa_redirect_errors_dec();
1513 }