]> code.delx.au - pulseaudio/blob - src/modules/module-alsa-sink.c
get rid of svn $ keywords
[pulseaudio] / src / modules / module-alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/xmalloc.h>
32 #include <pulse/util.h>
33 #include <pulse/timeval.h>
34
35 #include <pulsecore/core.h>
36 #include <pulsecore/module.h>
37 #include <pulsecore/memchunk.h>
38 #include <pulsecore/sink.h>
39 #include <pulsecore/modargs.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/thread.h>
45 #include <pulsecore/core-error.h>
46 #include <pulsecore/thread-mq.h>
47 #include <pulsecore/rtpoll.h>
48 #include <pulsecore/rtclock.h>
49 #include <pulsecore/time-smoother.h>
50
51 #include "alsa-util.h"
52 #include "module-alsa-sink-symdef.h"
53
54 PA_MODULE_AUTHOR("Lennart Poettering");
55 PA_MODULE_DESCRIPTION("ALSA Sink");
56 PA_MODULE_VERSION(PACKAGE_VERSION);
57 PA_MODULE_LOAD_ONCE(FALSE);
58 PA_MODULE_USAGE(
59 "sink_name=<name for the sink> "
60 "device=<ALSA device> "
61 "device_id=<ALSA card index> "
62 "format=<sample format> "
63 "rate=<sample rate> "
64 "channels=<number of channels> "
65 "channel_map=<channel map> "
66 "fragments=<number of fragments> "
67 "fragment_size=<fragment size> "
68 "mmap=<enable memory mapping?> "
69 "tsched=<enable system timer based scheduling mode?> "
70 "tsched_buffer_size=<buffer size when using timer based scheduling> "
71 "tsched_buffer_watermark=<lower fill watermark> "
72 "mixer_reset=<reset hw volume and mute settings to sane defaults when falling back to software?>");
73
74 static const char* const valid_modargs[] = {
75 "sink_name",
76 "device",
77 "device_id",
78 "format",
79 "rate",
80 "channels",
81 "channel_map",
82 "fragments",
83 "fragment_size",
84 "mmap",
85 "tsched",
86 "tsched_buffer_size",
87 "tsched_buffer_watermark",
88 "mixer_reset",
89 NULL
90 };
91
92 #define DEFAULT_DEVICE "default"
93 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
94 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
95 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
96 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
97
98 struct userdata {
99 pa_core *core;
100 pa_module *module;
101 pa_sink *sink;
102
103 pa_thread *thread;
104 pa_thread_mq thread_mq;
105 pa_rtpoll *rtpoll;
106
107 snd_pcm_t *pcm_handle;
108
109 pa_alsa_fdlist *mixer_fdl;
110 snd_mixer_t *mixer_handle;
111 snd_mixer_elem_t *mixer_elem;
112 long hw_volume_max, hw_volume_min;
113 long hw_dB_max, hw_dB_min;
114 pa_bool_t hw_dB_supported;
115
116 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
117 unsigned nfragments;
118 pa_memchunk memchunk;
119
120 char *device_name;
121
122 pa_bool_t use_mmap, use_tsched;
123
124 pa_bool_t first, after_rewind;
125
126 pa_rtpoll_item *alsa_rtpoll_item;
127
128 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
129
130 pa_smoother *smoother;
131 int64_t frame_index;
132 uint64_t since_start;
133
134 snd_pcm_sframes_t hwbuf_unused_frames;
135 };
136
137 static void fix_tsched_watermark(struct userdata *u) {
138 size_t max_use;
139 size_t min_sleep, min_wakeup;
140 pa_assert(u);
141
142 max_use = u->hwbuf_size - u->hwbuf_unused_frames * u->frame_size;
143
144 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
145 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
146
147 if (min_sleep > max_use/2)
148 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
149 if (min_sleep < u->frame_size)
150 min_sleep = u->frame_size;
151
152 if (min_wakeup > max_use/2)
153 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
154 if (min_wakeup < u->frame_size)
155 min_wakeup = u->frame_size;
156
157 if (u->tsched_watermark > max_use-min_sleep)
158 u->tsched_watermark = max_use-min_sleep;
159
160 if (u->tsched_watermark < min_wakeup)
161 u->tsched_watermark = min_wakeup;
162 }
163
164 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
165 pa_usec_t usec, wm;
166
167 pa_assert(sleep_usec);
168 pa_assert(process_usec);
169
170 pa_assert(u);
171
172 usec = pa_sink_get_requested_latency_within_thread(u->sink);
173
174 if (usec == (pa_usec_t) -1)
175 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
176
177 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
178
179 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
180
181 if (usec >= wm) {
182 *sleep_usec = usec - wm;
183 *process_usec = wm;
184 } else
185 *process_usec = *sleep_usec = usec / 2;
186
187 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
188 }
189
190 static int try_recover(struct userdata *u, const char *call, int err) {
191 pa_assert(u);
192 pa_assert(call);
193 pa_assert(err < 0);
194
195 pa_log_debug("%s: %s", call, snd_strerror(err));
196
197 pa_assert(err != -EAGAIN);
198
199 if (err == -EPIPE)
200 pa_log_debug("%s: Buffer underrun!", call);
201
202 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
203 u->first = TRUE;
204 u->since_start = 0;
205 return 0;
206 }
207
208 pa_log("%s: %s", call, snd_strerror(err));
209 return -1;
210 }
211
212 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
213 size_t left_to_play;
214
215 if (n*u->frame_size < u->hwbuf_size)
216 left_to_play = u->hwbuf_size - (n*u->frame_size);
217 else
218 left_to_play = 0;
219
220 if (left_to_play > 0) {
221 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
222 } else if (!u->first && !u->after_rewind) {
223 pa_log_info("Underrun!");
224
225 if (u->use_tsched) {
226 size_t old_watermark = u->tsched_watermark;
227
228 u->tsched_watermark *= 2;
229 fix_tsched_watermark(u);
230
231 if (old_watermark != u->tsched_watermark)
232 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
233 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
234 }
235 }
236
237 return left_to_play;
238 }
239
240 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec) {
241 int work_done = 0;
242 pa_usec_t max_sleep_usec, process_usec;
243 size_t left_to_play;
244
245 pa_assert(u);
246 pa_sink_assert_ref(u->sink);
247
248 if (u->use_tsched)
249 hw_sleep_time(u, &max_sleep_usec, &process_usec);
250
251 for (;;) {
252 snd_pcm_sframes_t n;
253 int r;
254
255 snd_pcm_hwsync(u->pcm_handle);
256
257 /* First we determine how many samples are missing to fill the
258 * buffer up to 100% */
259
260 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
261
262 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
263 continue;
264
265 return r;
266 }
267
268 left_to_play = check_left_to_play(u, n);
269
270 if (u->use_tsched)
271
272 /* We won't fill up the playback buffer before at least
273 * half the sleep time is over because otherwise we might
274 * ask for more data from the clients then they expect. We
275 * need to guarantee that clients only have to keep around
276 * a single hw buffer length. */
277
278 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
279 break;
280
281 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
282 break;
283
284 n -= u->hwbuf_unused_frames;
285
286 /* pa_log_debug("Filling up"); */
287
288 for (;;) {
289 pa_memchunk chunk;
290 void *p;
291 int err;
292 const snd_pcm_channel_area_t *areas;
293 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
294
295 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
296
297 if (PA_UNLIKELY((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0)) {
298
299 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
300 continue;
301
302 return r;
303 }
304
305 /* Make sure that if these memblocks need to be copied they will fit into one slot */
306 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
307 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
308
309 /* Check these are multiples of 8 bit */
310 pa_assert((areas[0].first & 7) == 0);
311 pa_assert((areas[0].step & 7)== 0);
312
313 /* We assume a single interleaved memory buffer */
314 pa_assert((areas[0].first >> 3) == 0);
315 pa_assert((areas[0].step >> 3) == u->frame_size);
316
317 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
318
319 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
320 chunk.length = pa_memblock_get_length(chunk.memblock);
321 chunk.index = 0;
322
323 pa_sink_render_into_full(u->sink, &chunk);
324
325 /* FIXME: Maybe we can do something to keep this memory block
326 * a little bit longer around? */
327 pa_memblock_unref_fixed(chunk.memblock);
328
329 if (PA_UNLIKELY((err = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
330
331 if ((r = try_recover(u, "snd_pcm_mmap_commit", err)) == 0)
332 continue;
333
334 return r;
335 }
336
337 work_done = 1;
338
339 u->frame_index += frames;
340 u->since_start += frames * u->frame_size;
341
342 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
343
344 if (frames >= (snd_pcm_uframes_t) n)
345 break;
346
347 n -= frames;
348 }
349 }
350
351 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
352 return work_done;
353 }
354
355 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec) {
356 int work_done = 0;
357 pa_usec_t max_sleep_usec, process_usec;
358 size_t left_to_play;
359
360 pa_assert(u);
361 pa_sink_assert_ref(u->sink);
362
363 if (u->use_tsched)
364 hw_sleep_time(u, &max_sleep_usec, &process_usec);
365
366 for (;;) {
367 snd_pcm_sframes_t n;
368 int r;
369
370 snd_pcm_hwsync(u->pcm_handle);
371
372 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
373
374 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
375 continue;
376
377 return r;
378 }
379
380 left_to_play = check_left_to_play(u, n);
381
382 if (u->use_tsched)
383
384 /* We won't fill up the playback buffer before at least
385 * half the sleep time is over because otherwise we might
386 * ask for more data from the clients then they expect. We
387 * need to guarantee that clients only have to keep around
388 * a single hw buffer length. */
389
390 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
391 break;
392
393 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
394 break;
395
396 n -= u->hwbuf_unused_frames;
397
398 for (;;) {
399 snd_pcm_sframes_t frames;
400 void *p;
401
402 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
403
404 if (u->memchunk.length <= 0)
405 pa_sink_render(u->sink, n * u->frame_size, &u->memchunk);
406
407 pa_assert(u->memchunk.length > 0);
408
409 frames = u->memchunk.length / u->frame_size;
410
411 if (frames > n)
412 frames = n;
413
414 p = pa_memblock_acquire(u->memchunk.memblock);
415 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, frames);
416 pa_memblock_release(u->memchunk.memblock);
417
418 pa_assert(frames != 0);
419
420 if (PA_UNLIKELY(frames < 0)) {
421
422 if ((r = try_recover(u, "snd_pcm_writei", n)) == 0)
423 continue;
424
425 return r;
426 }
427
428 u->memchunk.index += frames * u->frame_size;
429 u->memchunk.length -= frames * u->frame_size;
430
431 if (u->memchunk.length <= 0) {
432 pa_memblock_unref(u->memchunk.memblock);
433 pa_memchunk_reset(&u->memchunk);
434 }
435
436 work_done = 1;
437
438 u->frame_index += frames;
439 u->since_start += frames * u->frame_size;
440
441 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
442
443 if (frames >= n)
444 break;
445
446 n -= frames;
447 }
448 }
449
450 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
451 return work_done;
452 }
453
454 static void update_smoother(struct userdata *u) {
455 snd_pcm_sframes_t delay = 0;
456 int64_t frames;
457 int err;
458 pa_usec_t now1, now2;
459 /* struct timeval timestamp; */
460 snd_pcm_status_t *status;
461
462 snd_pcm_status_alloca(&status);
463
464 pa_assert(u);
465 pa_assert(u->pcm_handle);
466
467 /* Let's update the time smoother */
468
469 snd_pcm_hwsync(u->pcm_handle);
470 snd_pcm_avail_update(u->pcm_handle);
471
472 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
473 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
474 /* return; */
475 /* } */
476
477 /* delay = snd_pcm_status_get_delay(status); */
478
479 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
480 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
481 return;
482 }
483
484 frames = u->frame_index - delay;
485
486 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
487
488 /* snd_pcm_status_get_tstamp(status, &timestamp); */
489 /* pa_rtclock_from_wallclock(&timestamp); */
490 /* now1 = pa_timeval_load(&timestamp); */
491
492 now1 = pa_rtclock_usec();
493 now2 = pa_bytes_to_usec(frames * u->frame_size, &u->sink->sample_spec);
494 pa_smoother_put(u->smoother, now1, now2);
495 }
496
497 static pa_usec_t sink_get_latency(struct userdata *u) {
498 pa_usec_t r = 0;
499 int64_t delay;
500 pa_usec_t now1, now2;
501
502 pa_assert(u);
503
504 now1 = pa_rtclock_usec();
505 now2 = pa_smoother_get(u->smoother, now1);
506
507 delay = (int64_t) pa_bytes_to_usec(u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
508
509 if (delay > 0)
510 r = (pa_usec_t) delay;
511
512 if (u->memchunk.memblock)
513 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
514
515 return r;
516 }
517
518 static int build_pollfd(struct userdata *u) {
519 pa_assert(u);
520 pa_assert(u->pcm_handle);
521
522 if (u->alsa_rtpoll_item)
523 pa_rtpoll_item_free(u->alsa_rtpoll_item);
524
525 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
526 return -1;
527
528 return 0;
529 }
530
531 static int suspend(struct userdata *u) {
532 pa_assert(u);
533 pa_assert(u->pcm_handle);
534
535 pa_smoother_pause(u->smoother, pa_rtclock_usec());
536
537 /* Let's suspend */
538 snd_pcm_drain(u->pcm_handle);
539 snd_pcm_close(u->pcm_handle);
540 u->pcm_handle = NULL;
541
542 if (u->alsa_rtpoll_item) {
543 pa_rtpoll_item_free(u->alsa_rtpoll_item);
544 u->alsa_rtpoll_item = NULL;
545 }
546
547 pa_log_info("Device suspended...");
548
549 return 0;
550 }
551
552 static int update_sw_params(struct userdata *u) {
553 snd_pcm_uframes_t avail_min;
554 int err;
555
556 pa_assert(u);
557
558 /* Use the full buffer if noone asked us for anything specific */
559 u->hwbuf_unused_frames = 0;
560
561 if (u->use_tsched) {
562 pa_usec_t latency;
563
564 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
565 size_t b;
566
567 pa_log_debug("latency set to %0.2f", (double) latency / PA_USEC_PER_MSEC);
568
569 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
570
571 /* We need at least one sample in our buffer */
572
573 if (PA_UNLIKELY(b < u->frame_size))
574 b = u->frame_size;
575
576 u->hwbuf_unused_frames =
577 PA_LIKELY(b < u->hwbuf_size) ?
578 ((u->hwbuf_size - b) / u->frame_size) : 0;
579
580 fix_tsched_watermark(u);
581 }
582 }
583
584 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
585
586 /* We need at last one frame in the used part of the buffer */
587 avail_min = u->hwbuf_unused_frames + 1;
588
589 if (u->use_tsched) {
590 pa_usec_t sleep_usec, process_usec;
591
592 hw_sleep_time(u, &sleep_usec, &process_usec);
593 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec);
594 }
595
596 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
597
598 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
599 pa_log("Failed to set software parameters: %s", snd_strerror(err));
600 return err;
601 }
602
603 return 0;
604 }
605
606 static int unsuspend(struct userdata *u) {
607 pa_sample_spec ss;
608 int err;
609 pa_bool_t b, d;
610 unsigned nfrags;
611 snd_pcm_uframes_t period_size;
612
613 pa_assert(u);
614 pa_assert(!u->pcm_handle);
615
616 pa_log_info("Trying resume...");
617
618 snd_config_update_free_global();
619 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
620 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
621 goto fail;
622 }
623
624 ss = u->sink->sample_spec;
625 nfrags = u->nfragments;
626 period_size = u->fragment_size / u->frame_size;
627 b = u->use_mmap;
628 d = u->use_tsched;
629
630 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
631 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
632 goto fail;
633 }
634
635 if (b != u->use_mmap || d != u->use_tsched) {
636 pa_log_warn("Resume failed, couldn't get original access mode.");
637 goto fail;
638 }
639
640 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
641 pa_log_warn("Resume failed, couldn't restore original sample settings.");
642 goto fail;
643 }
644
645 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
646 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
647 goto fail;
648 }
649
650 if (update_sw_params(u) < 0)
651 goto fail;
652
653 if (build_pollfd(u) < 0)
654 goto fail;
655
656 /* FIXME: We need to reload the volume somehow */
657
658 u->first = TRUE;
659 u->since_start = 0;
660
661 pa_log_info("Resumed successfully...");
662
663 return 0;
664
665 fail:
666 if (u->pcm_handle) {
667 snd_pcm_close(u->pcm_handle);
668 u->pcm_handle = NULL;
669 }
670
671 return -1;
672 }
673
674 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
675 struct userdata *u = PA_SINK(o)->userdata;
676
677 switch (code) {
678
679 case PA_SINK_MESSAGE_GET_LATENCY: {
680 pa_usec_t r = 0;
681
682 if (u->pcm_handle)
683 r = sink_get_latency(u);
684
685 *((pa_usec_t*) data) = r;
686
687 return 0;
688 }
689
690 case PA_SINK_MESSAGE_SET_STATE:
691
692 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
693
694 case PA_SINK_SUSPENDED:
695 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
696
697 if (suspend(u) < 0)
698 return -1;
699
700 break;
701
702 case PA_SINK_IDLE:
703 case PA_SINK_RUNNING:
704
705 if (u->sink->thread_info.state == PA_SINK_INIT) {
706 if (build_pollfd(u) < 0)
707 return -1;
708 }
709
710 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
711 if (unsuspend(u) < 0)
712 return -1;
713 }
714
715 break;
716
717 case PA_SINK_UNLINKED:
718 case PA_SINK_INIT:
719 ;
720 }
721
722 break;
723 }
724
725 return pa_sink_process_msg(o, code, data, offset, chunk);
726 }
727
728 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
729 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
730
731 pa_assert(u);
732 pa_assert(u->mixer_handle);
733
734 if (mask == SND_CTL_EVENT_MASK_REMOVE)
735 return 0;
736
737 if (mask & SND_CTL_EVENT_MASK_VALUE) {
738 pa_sink_get_volume(u->sink);
739 pa_sink_get_mute(u->sink);
740 }
741
742 return 0;
743 }
744
745 static int sink_get_volume_cb(pa_sink *s) {
746 struct userdata *u = s->userdata;
747 int err;
748 int i;
749
750 pa_assert(u);
751 pa_assert(u->mixer_elem);
752
753 for (i = 0; i < s->sample_spec.channels; i++) {
754 long alsa_vol;
755
756 pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
757
758 if (u->hw_dB_supported) {
759
760 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) >= 0) {
761 s->volume.values[i] = pa_sw_volume_from_dB(alsa_vol / 100.0);
762 continue;
763 }
764
765 u->hw_dB_supported = FALSE;
766 }
767
768 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
769 goto fail;
770
771 s->volume.values[i] = (pa_volume_t) roundf(((float) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
772 }
773
774 return 0;
775
776 fail:
777 pa_log_error("Unable to read volume: %s", snd_strerror(err));
778
779 return -1;
780 }
781
782 static int sink_set_volume_cb(pa_sink *s) {
783 struct userdata *u = s->userdata;
784 int err;
785 int i;
786
787 pa_assert(u);
788 pa_assert(u->mixer_elem);
789
790 for (i = 0; i < s->sample_spec.channels; i++) {
791 long alsa_vol;
792 pa_volume_t vol;
793
794 pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
795
796 vol = PA_MIN(s->volume.values[i], PA_VOLUME_NORM);
797
798 if (u->hw_dB_supported) {
799 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
800 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
801
802 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, -1)) >= 0) {
803
804 if (snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol) >= 0)
805 s->volume.values[i] = pa_sw_volume_from_dB(alsa_vol / 100.0);
806
807 continue;
808 }
809
810 u->hw_dB_supported = FALSE;
811
812 }
813
814 alsa_vol = (long) roundf(((float) vol * (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
815 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
816
817 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
818 goto fail;
819
820 if (snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol) >= 0)
821 s->volume.values[i] = (pa_volume_t) roundf(((float) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
822 }
823
824 return 0;
825
826 fail:
827 pa_log_error("Unable to set volume: %s", snd_strerror(err));
828
829 return -1;
830 }
831
832 static int sink_get_mute_cb(pa_sink *s) {
833 struct userdata *u = s->userdata;
834 int err, sw;
835
836 pa_assert(u);
837 pa_assert(u->mixer_elem);
838
839 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
840 pa_log_error("Unable to get switch: %s", snd_strerror(err));
841 return -1;
842 }
843
844 s->muted = !sw;
845
846 return 0;
847 }
848
849 static int sink_set_mute_cb(pa_sink *s) {
850 struct userdata *u = s->userdata;
851 int err;
852
853 pa_assert(u);
854 pa_assert(u->mixer_elem);
855
856 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
857 pa_log_error("Unable to set switch: %s", snd_strerror(err));
858 return -1;
859 }
860
861 return 0;
862 }
863
864 static void sink_update_requested_latency_cb(pa_sink *s) {
865 struct userdata *u = s->userdata;
866 snd_pcm_sframes_t before;
867 pa_assert(u);
868
869 if (!u->pcm_handle)
870 return;
871
872 before = u->hwbuf_unused_frames;
873 update_sw_params(u);
874
875 /* Let's check whether we now use only a smaller part of the
876 buffer then before. If so, we need to make sure that subsequent
877 rewinds are relative to the new maxium fill level and not to the
878 current fill level. Thus, let's do a full rewind once, to clear
879 things up. */
880
881 if (u->hwbuf_unused_frames > before) {
882 pa_log_debug("Requesting rewind due to latency change.");
883 pa_sink_request_rewind(s, 0);
884 }
885 }
886
887 static int process_rewind(struct userdata *u) {
888 snd_pcm_sframes_t unused;
889 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
890 pa_assert(u);
891
892 /* Figure out how much we shall rewind and reset the counter */
893 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
894 u->sink->thread_info.rewind_nbytes = 0;
895
896 pa_assert(rewind_nbytes > 0);
897 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
898
899 snd_pcm_hwsync(u->pcm_handle);
900 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
901 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror(unused));
902 return -1;
903 }
904
905 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
906
907 if (u->hwbuf_size > unused_nbytes)
908 limit_nbytes = u->hwbuf_size - unused_nbytes;
909 else
910 limit_nbytes = 0;
911
912 if (rewind_nbytes > limit_nbytes)
913 rewind_nbytes = limit_nbytes;
914
915 if (rewind_nbytes > 0) {
916 snd_pcm_sframes_t in_frames, out_frames;
917
918 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
919
920 in_frames = (snd_pcm_sframes_t) rewind_nbytes / u->frame_size;
921 pa_log_debug("before: %lu", (unsigned long) in_frames);
922 if ((out_frames = snd_pcm_rewind(u->pcm_handle, in_frames)) < 0) {
923 pa_log("snd_pcm_rewind() failed: %s", snd_strerror(out_frames));
924 return -1;
925 }
926 pa_log_debug("after: %lu", (unsigned long) out_frames);
927
928 rewind_nbytes = out_frames * u->frame_size;
929
930 if (rewind_nbytes <= 0)
931 pa_log_info("Tried rewind, but was apparently not possible.");
932 else {
933 u->frame_index -= out_frames;
934 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
935 pa_sink_process_rewind(u->sink, rewind_nbytes);
936
937 u->after_rewind = TRUE;
938 }
939 } else
940 pa_log_debug("Mhmm, actually there is nothing to rewind.");
941
942 return 0;
943 }
944
945 static void thread_func(void *userdata) {
946 struct userdata *u = userdata;
947
948 pa_assert(u);
949
950 pa_log_debug("Thread starting up");
951
952 if (u->core->realtime_scheduling)
953 pa_make_realtime(u->core->realtime_priority);
954
955 pa_thread_mq_install(&u->thread_mq);
956 pa_rtpoll_install(u->rtpoll);
957
958 for (;;) {
959 int ret;
960
961 /* pa_log_debug("loop"); */
962
963 /* Render some data and write it to the dsp */
964 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
965 int work_done;
966 pa_usec_t sleep_usec;
967
968 if (u->sink->thread_info.rewind_nbytes > 0)
969 if (process_rewind(u) < 0)
970 goto fail;
971
972 if (u->use_mmap)
973 work_done = mmap_write(u, &sleep_usec);
974 else
975 work_done = unix_write(u, &sleep_usec);
976
977 if (work_done < 0)
978 goto fail;
979
980 /* pa_log_debug("work_done = %i", work_done); */
981
982 if (work_done) {
983
984 if (u->first) {
985 pa_log_info("Starting playback.");
986 snd_pcm_start(u->pcm_handle);
987
988 pa_smoother_resume(u->smoother, pa_rtclock_usec());
989 }
990
991 update_smoother(u);
992 }
993
994 if (u->use_tsched) {
995 pa_usec_t cusec;
996
997 if (u->since_start <= u->hwbuf_size) {
998
999 /* USB devices on ALSA seem to hit a buffer
1000 * underrun during the first iterations much
1001 * quicker then we calculate here, probably due to
1002 * the transport latency. To accomodate for that
1003 * we artificially decrease the sleep time until
1004 * we have filled the buffer at least once
1005 * completely.*/
1006
1007 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1008 sleep_usec /= 2;
1009 }
1010
1011 /* OK, the playback buffer is now full, let's
1012 * calculate when to wake up next */
1013 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1014
1015 /* Convert from the sound card time domain to the
1016 * system time domain */
1017 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1018
1019 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1020
1021 /* We don't trust the conversion, so we wake up whatever comes first */
1022 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1023 }
1024
1025 u->first = FALSE;
1026 u->after_rewind = FALSE;
1027
1028 } else if (u->use_tsched)
1029
1030 /* OK, we're in an invalid state, let's disable our timers */
1031 pa_rtpoll_set_timer_disabled(u->rtpoll);
1032
1033 /* Hmm, nothing to do. Let's sleep */
1034 if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
1035 goto fail;
1036
1037 if (ret == 0)
1038 goto finish;
1039
1040 /* Tell ALSA about this and process its response */
1041 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1042 struct pollfd *pollfd;
1043 unsigned short revents = 0;
1044 int err;
1045 unsigned n;
1046
1047 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1048
1049 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1050 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1051 goto fail;
1052 }
1053
1054 if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
1055 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1056 goto fail;
1057
1058 u->first = TRUE;
1059 u->since_start = 0;
1060 }
1061
1062 if (revents && u->use_tsched)
1063 pa_log_debug("Wakeup from ALSA! (%i)", revents);
1064 }
1065 }
1066
1067 fail:
1068 /* If this was no regular exit from the loop we have to continue
1069 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1070 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1071 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1072
1073 finish:
1074 pa_log_debug("Thread shutting down");
1075 }
1076
1077 int pa__init(pa_module*m) {
1078
1079 pa_modargs *ma = NULL;
1080 struct userdata *u = NULL;
1081 const char *dev_id;
1082 pa_sample_spec ss;
1083 pa_channel_map map;
1084 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1085 snd_pcm_uframes_t period_frames, tsched_frames;
1086 size_t frame_size;
1087 snd_pcm_info_t *pcm_info = NULL;
1088 int err;
1089 const char *name;
1090 char *name_buf = NULL;
1091 pa_bool_t namereg_fail;
1092 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, mixer_reset = TRUE;
1093 pa_usec_t usec;
1094 pa_sink_new_data data;
1095
1096 snd_pcm_info_alloca(&pcm_info);
1097
1098 pa_assert(m);
1099
1100 pa_alsa_redirect_errors_inc();
1101
1102 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1103 pa_log("Failed to parse module arguments");
1104 goto fail;
1105 }
1106
1107 ss = m->core->default_sample_spec;
1108 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1109 pa_log("Failed to parse sample specification and channel map");
1110 goto fail;
1111 }
1112
1113 frame_size = pa_frame_size(&ss);
1114
1115 nfrags = m->core->default_n_fragments;
1116 frag_size = pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1117 if (frag_size <= 0)
1118 frag_size = frame_size;
1119 tsched_size = pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1120 tsched_watermark = pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1121
1122 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1123 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1124 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1125 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1126 pa_log("Failed to parse buffer metrics");
1127 goto fail;
1128 }
1129
1130 hwbuf_size = frag_size * nfrags;
1131 period_frames = frag_size/frame_size;
1132 tsched_frames = tsched_size/frame_size;
1133
1134 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1135 pa_log("Failed to parse mmap argument.");
1136 goto fail;
1137 }
1138
1139 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1140 pa_log("Failed to parse timer_scheduling argument.");
1141 goto fail;
1142 }
1143
1144 if (use_tsched && !pa_rtclock_hrtimer()) {
1145 pa_log("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1146 use_tsched = FALSE;
1147 }
1148
1149 if (pa_modargs_get_value_boolean(ma, "mixer_reset", &mixer_reset) < 0) {
1150 pa_log("Failed to parse mixer_reset argument.");
1151 goto fail;
1152 }
1153
1154 u = pa_xnew0(struct userdata, 1);
1155 u->core = m->core;
1156 u->module = m;
1157 m->userdata = u;
1158 u->use_mmap = use_mmap;
1159 u->use_tsched = use_tsched;
1160 u->first = TRUE;
1161 u->since_start = 0;
1162 u->after_rewind = FALSE;
1163 u->rtpoll = pa_rtpoll_new();
1164 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1165 u->alsa_rtpoll_item = NULL;
1166
1167 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1168 usec = pa_rtclock_usec();
1169 pa_smoother_set_time_offset(u->smoother, usec);
1170 pa_smoother_pause(u->smoother, usec);
1171
1172 snd_config_update_free_global();
1173
1174 b = use_mmap;
1175 d = use_tsched;
1176
1177 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1178
1179 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1180 dev_id,
1181 &u->device_name,
1182 &ss, &map,
1183 SND_PCM_STREAM_PLAYBACK,
1184 &nfrags, &period_frames, tsched_frames,
1185 &b, &d)))
1186
1187 goto fail;
1188
1189 } else {
1190
1191 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1192 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1193 &u->device_name,
1194 &ss, &map,
1195 SND_PCM_STREAM_PLAYBACK,
1196 &nfrags, &period_frames, tsched_frames,
1197 &b, &d)))
1198 goto fail;
1199
1200 }
1201
1202 pa_assert(u->device_name);
1203 pa_log_info("Successfully opened device %s.", u->device_name);
1204
1205 if (use_mmap && !b) {
1206 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1207 u->use_mmap = use_mmap = FALSE;
1208 }
1209
1210 if (use_tsched && (!b || !d)) {
1211 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1212 u->use_tsched = use_tsched = FALSE;
1213 }
1214
1215 if (u->use_mmap)
1216 pa_log_info("Successfully enabled mmap() mode.");
1217
1218 if (u->use_tsched)
1219 pa_log_info("Successfully enabled timer-based scheduling mode.");
1220
1221 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1222 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1223 goto fail;
1224 }
1225
1226 /* ALSA might tweak the sample spec, so recalculate the frame size */
1227 frame_size = pa_frame_size(&ss);
1228
1229 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1230 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1231 else {
1232 pa_bool_t found = FALSE;
1233
1234 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1235 found = TRUE;
1236 else {
1237 snd_pcm_info_t *info;
1238
1239 snd_pcm_info_alloca(&info);
1240
1241 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1242 char *md;
1243 int card;
1244
1245 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1246
1247 md = pa_sprintf_malloc("hw:%i", card);
1248
1249 if (strcmp(u->device_name, md))
1250 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1251 found = TRUE;
1252 pa_xfree(md);
1253 }
1254 }
1255 }
1256
1257 if (found)
1258 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM")))
1259 found = FALSE;
1260
1261 if (!found) {
1262 snd_mixer_close(u->mixer_handle);
1263 u->mixer_handle = NULL;
1264 }
1265 }
1266
1267 if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
1268 namereg_fail = TRUE;
1269 else {
1270 name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
1271 namereg_fail = FALSE;
1272 }
1273
1274 pa_sink_new_data_init(&data);
1275 data.driver = __FILE__;
1276 data.module = m;
1277 pa_sink_new_data_set_name(&data, name);
1278 data.namereg_fail = namereg_fail;
1279 pa_sink_new_data_set_sample_spec(&data, &ss);
1280 pa_sink_new_data_set_channel_map(&data, &map);
1281
1282 pa_alsa_init_proplist(data.proplist, pcm_info);
1283 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1284 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1285 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1286 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1287
1288 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1289 pa_sink_new_data_done(&data);
1290 pa_xfree(name_buf);
1291
1292 if (!u->sink) {
1293 pa_log("Failed to create sink object");
1294 goto fail;
1295 }
1296
1297 u->sink->parent.process_msg = sink_process_msg;
1298 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1299 u->sink->userdata = u;
1300
1301 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1302 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1303
1304 u->frame_size = frame_size;
1305 u->fragment_size = frag_size = period_frames * frame_size;
1306 u->nfragments = nfrags;
1307 u->hwbuf_size = u->fragment_size * nfrags;
1308 u->hwbuf_unused_frames = 0;
1309 u->tsched_watermark = tsched_watermark;
1310 u->frame_index = 0;
1311 u->hw_dB_supported = FALSE;
1312 u->hw_dB_min = u->hw_dB_max = 0;
1313 u->hw_volume_min = u->hw_volume_max = 0;
1314
1315 if (use_tsched)
1316 fix_tsched_watermark(u);
1317
1318 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1319
1320 pa_sink_set_latency_range(u->sink,
1321 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1322 pa_bytes_to_usec(u->hwbuf_size, &ss));
1323
1324 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1325 nfrags, (long unsigned) u->fragment_size,
1326 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1327
1328 if (use_tsched)
1329 pa_log_info("Time scheduling watermark is %0.2fms",
1330 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1331
1332 if (update_sw_params(u) < 0)
1333 goto fail;
1334
1335 pa_memchunk_reset(&u->memchunk);
1336
1337 if (u->mixer_handle) {
1338 pa_assert(u->mixer_elem);
1339
1340 if (snd_mixer_selem_has_playback_volume(u->mixer_elem))
1341
1342 if (pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0 &&
1343 snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) >= 0) {
1344
1345 pa_bool_t suitable = TRUE;
1346
1347 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1348
1349 if (u->hw_volume_min > u->hw_volume_max) {
1350
1351 pa_log_info("Minimal volume %li larger than maximum volume %li. Strange stuff Falling back to software volume control.", u->hw_volume_min, u->hw_volume_max);
1352 suitable = FALSE;
1353
1354 } else if (u->hw_volume_max - u->hw_volume_min < 3) {
1355
1356 pa_log_info("Device has less than 4 volume levels. Falling back to software volume control.");
1357 suitable = FALSE;
1358
1359 } else if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) >= 0) {
1360
1361 /* u->hw_dB_max = 0; u->hw_dB_min = -3000; Use this to make valgrind shut up */
1362
1363 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", u->hw_dB_min/100.0, u->hw_dB_max/100.0);
1364
1365 /* Let's see if this thing actually is useful for muting */
1366 if (u->hw_dB_min > -6000) {
1367 pa_log_info("Device cannot attenuate for more than -60 dB (only %0.2f dB supported), falling back to software volume control.", ((double) u->hw_dB_min) / 100);
1368
1369 suitable = FALSE;
1370 } else if (u->hw_dB_max < 0) {
1371
1372 pa_log_info("Device is still attenuated at maximum volume setting (%0.2f dB is maximum). Strange stuff. Falling back to software volume control.", ((double) u->hw_dB_max) / 100);
1373 suitable = FALSE;
1374
1375 } else if (u->hw_dB_min >= u->hw_dB_max) {
1376
1377 pa_log_info("Minimal dB (%0.2f) larger or equal to maximum dB (%0.2f). Strange stuff. Falling back to software volume control.", ((double) u->hw_dB_min) / 100, ((double) u->hw_dB_max) / 100);
1378 suitable = FALSE;
1379
1380 } else {
1381
1382 if (u->hw_dB_max > 0) {
1383 /* dB > 0 means overamplification, and clipping, we don't want that here */
1384 pa_log_info("Device can do overamplification for %0.2f dB. Limiting to 0 db", ((double) u->hw_dB_max) / 100);
1385 u->hw_dB_max = 0;
1386 }
1387
1388 u->hw_dB_supported = TRUE;
1389 }
1390 }
1391
1392 if (suitable) {
1393 u->sink->get_volume = sink_get_volume_cb;
1394 u->sink->set_volume = sink_set_volume_cb;
1395 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1396 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1397
1398 } else if (mixer_reset) {
1399 pa_log_info("Using software volume control. Trying to reset sound card to 0 dB.");
1400 pa_alsa_0dB_playback(u->mixer_elem);
1401 } else
1402 pa_log_info("Using software volume control. Leaving hw mixer controls untouched.");
1403 }
1404
1405 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1406 u->sink->get_mute = sink_get_mute_cb;
1407 u->sink->set_mute = sink_set_mute_cb;
1408 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1409 }
1410
1411 u->mixer_fdl = pa_alsa_fdlist_new();
1412
1413 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1414 pa_log("Failed to initialize file descriptor monitoring");
1415 goto fail;
1416 }
1417
1418 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1419 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1420 } else
1421 u->mixer_fdl = NULL;
1422
1423 pa_alsa_dump(u->pcm_handle);
1424
1425 if (!(u->thread = pa_thread_new(thread_func, u))) {
1426 pa_log("Failed to create thread.");
1427 goto fail;
1428 }
1429
1430 /* Get initial mixer settings */
1431 if (data.volume_is_set) {
1432 if (u->sink->set_volume)
1433 u->sink->set_volume(u->sink);
1434 } else {
1435 if (u->sink->get_volume)
1436 u->sink->get_volume(u->sink);
1437 }
1438
1439 if (data.muted_is_set) {
1440 if (u->sink->set_mute)
1441 u->sink->set_mute(u->sink);
1442 } else {
1443 if (u->sink->get_mute)
1444 u->sink->get_mute(u->sink);
1445 }
1446
1447 pa_sink_put(u->sink);
1448
1449 pa_modargs_free(ma);
1450
1451 return 0;
1452
1453 fail:
1454
1455 if (ma)
1456 pa_modargs_free(ma);
1457
1458 pa__done(m);
1459
1460 return -1;
1461 }
1462
1463 void pa__done(pa_module*m) {
1464 struct userdata *u;
1465
1466 pa_assert(m);
1467
1468 if (!(u = m->userdata)) {
1469 pa_alsa_redirect_errors_dec();
1470 return;
1471 }
1472
1473 if (u->sink)
1474 pa_sink_unlink(u->sink);
1475
1476 if (u->thread) {
1477 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1478 pa_thread_free(u->thread);
1479 }
1480
1481 pa_thread_mq_done(&u->thread_mq);
1482
1483 if (u->sink)
1484 pa_sink_unref(u->sink);
1485
1486 if (u->memchunk.memblock)
1487 pa_memblock_unref(u->memchunk.memblock);
1488
1489 if (u->alsa_rtpoll_item)
1490 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1491
1492 if (u->rtpoll)
1493 pa_rtpoll_free(u->rtpoll);
1494
1495 if (u->mixer_fdl)
1496 pa_alsa_fdlist_free(u->mixer_fdl);
1497
1498 if (u->mixer_handle)
1499 snd_mixer_close(u->mixer_handle);
1500
1501 if (u->pcm_handle) {
1502 snd_pcm_drop(u->pcm_handle);
1503 snd_pcm_close(u->pcm_handle);
1504 }
1505
1506 if (u->smoother)
1507 pa_smoother_free(u->smoother);
1508
1509 pa_xfree(u->device_name);
1510 pa_xfree(u);
1511
1512 snd_config_update_free_global();
1513
1514 pa_alsa_redirect_errors_dec();
1515 }