]> code.delx.au - pulseaudio/blob - src/modules/module-alsa-sink.c
* don't increase tsched_watermark on underrun without limits
[pulseaudio] / src / modules / module-alsa-sink.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
8
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as published
11 by the Free Software Foundation; either version 2 of the License,
12 or (at your option) any later version.
13
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
18
19 You should have received a copy of the GNU Lesser General Public License
20 along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 USA.
23 ***/
24
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28
29 #include <stdio.h>
30
31 #include <asoundlib.h>
32
33 #include <pulse/xmalloc.h>
34 #include <pulse/util.h>
35 #include <pulse/timeval.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-util.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/thread.h>
47 #include <pulsecore/core-error.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/rtclock.h>
51 #include <pulsecore/time-smoother.h>
52
53 #include "alsa-util.h"
54 #include "module-alsa-sink-symdef.h"
55
56 PA_MODULE_AUTHOR("Lennart Poettering");
57 PA_MODULE_DESCRIPTION("ALSA Sink");
58 PA_MODULE_VERSION(PACKAGE_VERSION);
59 PA_MODULE_LOAD_ONCE(FALSE);
60 PA_MODULE_USAGE(
61 "sink_name=<name for the sink> "
62 "device=<ALSA device> "
63 "device_id=<ALSA card index> "
64 "format=<sample format> "
65 "rate=<sample rate> "
66 "channels=<number of channels> "
67 "channel_map=<channel map> "
68 "fragments=<number of fragments> "
69 "fragment_size=<fragment size> "
70 "mmap=<enable memory mapping?> "
71 "tsched=<enable system timer based scheduling mode?> "
72 "tsched_buffer_size=<buffer size when using timer based scheduling> "
73 "tsched_buffer_watermark=<lower fill watermark> "
74 "mixer_reset=<reset hw volume and mute settings to sane defaults when falling back to software?>");
75
76 static const char* const valid_modargs[] = {
77 "sink_name",
78 "device",
79 "device_id",
80 "format",
81 "rate",
82 "channels",
83 "channel_map",
84 "fragments",
85 "fragment_size",
86 "mmap",
87 "tsched",
88 "tsched_buffer_size",
89 "tsched_buffer_watermark",
90 "mixer_reset",
91 NULL
92 };
93
94 #define DEFAULT_DEVICE "default"
95 #define DEFAULT_TSCHED_BUFFER_USEC (5*PA_USEC_PER_SEC) /* 5s */
96 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
97
98 struct userdata {
99 pa_core *core;
100 pa_module *module;
101 pa_sink *sink;
102
103 pa_thread *thread;
104 pa_thread_mq thread_mq;
105 pa_rtpoll *rtpoll;
106
107 snd_pcm_t *pcm_handle;
108
109 pa_alsa_fdlist *mixer_fdl;
110 snd_mixer_t *mixer_handle;
111 snd_mixer_elem_t *mixer_elem;
112 long hw_volume_max, hw_volume_min;
113 long hw_dB_max, hw_dB_min;
114 pa_bool_t hw_dB_supported;
115
116 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
117 unsigned nfragments;
118 pa_memchunk memchunk;
119
120 char *device_name;
121
122 pa_bool_t use_mmap, use_tsched;
123
124 pa_bool_t first;
125
126 pa_rtpoll_item *alsa_rtpoll_item;
127
128 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
129
130 pa_smoother *smoother;
131 int64_t frame_index;
132
133 snd_pcm_sframes_t hwbuf_unused_frames;
134 snd_pcm_sframes_t avail_min_frames;
135 };
136
137 static void fix_tsched_watermark(struct userdata *u) {
138 size_t max_use;
139 pa_assert(u);
140
141 max_use = u->hwbuf_size - u->hwbuf_unused_frames * u->frame_size;
142
143 if (u->tsched_watermark >= max_use/2)
144 u->tsched_watermark = max_use/2;
145 }
146
147 static int mmap_write(struct userdata *u) {
148 int work_done = 0;
149
150 pa_assert(u);
151 pa_sink_assert_ref(u->sink);
152
153 for (;;) {
154 pa_memchunk chunk;
155 void *p;
156 snd_pcm_sframes_t n;
157 int err;
158 const snd_pcm_channel_area_t *areas;
159 snd_pcm_uframes_t offset, frames;
160 size_t left_to_play;
161
162 snd_pcm_hwsync(u->pcm_handle);
163
164 /* First we determine how many samples are missing to fill the
165 * buffer up to 100% */
166
167 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
168
169 pa_log_debug("snd_pcm_avail_update: %s", snd_strerror(n));
170
171 if (err == -EAGAIN) {
172 pa_log_debug("EAGAIN");
173 return work_done;
174 }
175
176 if (n == -EPIPE)
177 pa_log_debug("snd_pcm_avail_update: Buffer underrun!");
178
179 if ((err = snd_pcm_recover(u->pcm_handle, n, 1)) == 0) {
180 u->first = TRUE;
181 continue;
182 }
183
184 pa_log("snd_pcm_recover: %s", snd_strerror(err));
185 return -1;
186 }
187
188 /* We only use part of the buffer that matches our
189 * dynamically requested latency */
190
191 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
192 return work_done;
193
194 if (n*u->frame_size < u->hwbuf_size)
195 left_to_play = u->hwbuf_size - (n*u->frame_size);
196 else
197 left_to_play = 0;
198
199 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
200
201 if (left_to_play <= 0 && !u->first) {
202 u->tsched_watermark *= 2;
203 fix_tsched_watermark(u);
204 pa_log_notice("Underrun! Increasing wakeup watermark to %0.2f ms",
205 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
206 }
207
208 frames = n = n - u->hwbuf_unused_frames;
209
210 pa_log_debug("%llu frames to write", (unsigned long long) frames);
211
212 if (PA_UNLIKELY((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0)) {
213
214 pa_log_debug("snd_pcm_mmap_begin: %s", snd_strerror(err));
215
216 if (err == -EAGAIN) {
217 pa_log_debug("EAGAIN");
218 return work_done;
219 }
220
221 if (err == -EPIPE)
222 pa_log_debug("snd_pcm_mmap_begin: Buffer underrun!");
223
224 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
225 u->first = TRUE;
226 continue;
227 }
228
229 pa_log("Failed to write data to DSP: %s", snd_strerror(err));
230 return -1;
231 }
232
233 /* Check these are multiples of 8 bit */
234 pa_assert((areas[0].first & 7) == 0);
235 pa_assert((areas[0].step & 7)== 0);
236
237 /* We assume a single interleaved memory buffer */
238 pa_assert((areas[0].first >> 3) == 0);
239 pa_assert((areas[0].step >> 3) == u->frame_size);
240
241 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
242
243 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, 1);
244 chunk.length = pa_memblock_get_length(chunk.memblock);
245 chunk.index = 0;
246
247 pa_sink_render_into_full(u->sink, &chunk);
248
249 /* FIXME: Maybe we can do something to keep this memory block
250 * a little bit longer around? */
251 pa_memblock_unref_fixed(chunk.memblock);
252
253 if (PA_UNLIKELY((err = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
254
255 pa_log_debug("snd_pcm_mmap_commit: %s", snd_strerror(err));
256
257 if (err == -EAGAIN) {
258 pa_log_debug("EAGAIN");
259 return work_done;
260 }
261
262 if (err == -EPIPE)
263 pa_log_debug("snd_pcm_mmap_commit: Buffer underrun!");
264
265 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
266 u->first = TRUE;
267 continue;
268 }
269
270 pa_log("Failed to write data to DSP: %s", snd_strerror(err));
271 return -1;
272 }
273
274 work_done = 1;
275
276 u->frame_index += frames;
277
278 pa_log_debug("wrote %llu frames", (unsigned long long) frames);
279
280 if (PA_LIKELY(frames >= (snd_pcm_uframes_t) n))
281 return work_done;
282 }
283 }
284
285 static int unix_write(struct userdata *u) {
286 snd_pcm_status_t *status;
287 int work_done = 0;
288
289 snd_pcm_status_alloca(&status);
290
291 pa_assert(u);
292 pa_sink_assert_ref(u->sink);
293
294 for (;;) {
295 void *p;
296 snd_pcm_sframes_t n, frames;
297 int err;
298
299 snd_pcm_hwsync(u->pcm_handle);
300 snd_pcm_avail_update(u->pcm_handle);
301
302 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) {
303 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
304 return -1;
305 }
306
307 if (PA_UNLIKELY(snd_pcm_status_get_avail_max(status)*u->frame_size >= u->hwbuf_size))
308 pa_log_debug("Buffer underrun!");
309
310 n = snd_pcm_status_get_avail(status);
311
312 /* We only use part of the buffer that matches our
313 * dynamically requested latency */
314
315 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
316 return work_done;
317
318 n -= u->hwbuf_unused_frames;
319
320 if (u->memchunk.length <= 0)
321 pa_sink_render(u->sink, n * u->frame_size, &u->memchunk);
322
323 pa_assert(u->memchunk.length > 0);
324
325 frames = u->memchunk.length / u->frame_size;
326
327 if (frames > n)
328 frames = n;
329
330 p = pa_memblock_acquire(u->memchunk.memblock);
331 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, frames);
332 pa_memblock_release(u->memchunk.memblock);
333
334 pa_assert(frames != 0);
335
336 if (PA_UNLIKELY(frames < 0)) {
337
338 if (frames == -EAGAIN) {
339 pa_log_debug("EAGAIN");
340 return work_done;
341 }
342
343 if (frames == -EPIPE)
344 pa_log_debug("snd_pcm_avail_update: Buffer underrun!");
345
346 if ((frames = snd_pcm_recover(u->pcm_handle, frames, 1)) == 0) {
347 u->first = TRUE;
348 continue;
349 }
350
351 pa_log("Failed to write data to DSP: %s", snd_strerror(frames));
352 return -1;
353 }
354
355 u->memchunk.index += frames * u->frame_size;
356 u->memchunk.length -= frames * u->frame_size;
357
358 if (u->memchunk.length <= 0) {
359 pa_memblock_unref(u->memchunk.memblock);
360 pa_memchunk_reset(&u->memchunk);
361 }
362
363 work_done = 1;
364
365 u->frame_index += frames;
366
367 if (PA_LIKELY(frames >= n))
368 return work_done;
369 }
370 }
371
372 static void update_smoother(struct userdata *u) {
373 snd_pcm_sframes_t delay = 0;
374 int64_t frames;
375 int err;
376 pa_usec_t now1, now2;
377 /* struct timeval timestamp; */
378 snd_pcm_status_t *status;
379
380 snd_pcm_status_alloca(&status);
381
382 pa_assert(u);
383 pa_assert(u->pcm_handle);
384
385 /* Let's update the time smoother */
386
387 snd_pcm_hwsync(u->pcm_handle);
388 snd_pcm_avail_update(u->pcm_handle);
389
390 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
391 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
392 /* return; */
393 /* } */
394
395 /* delay = snd_pcm_status_get_delay(status); */
396
397 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
398 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
399 return;
400 }
401
402
403 frames = u->frame_index - delay;
404 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
405
406 /* snd_pcm_status_get_tstamp(status, &timestamp); */
407 /* pa_rtclock_from_wallclock(&timestamp); */
408 /* now1 = pa_timeval_load(&timestamp); */
409
410 now1 = pa_rtclock_usec();
411 now2 = pa_bytes_to_usec(frames * u->frame_size, &u->sink->sample_spec);
412 pa_smoother_put(u->smoother, now1, now2);
413 }
414
415 static pa_usec_t sink_get_latency(struct userdata *u) {
416 pa_usec_t r = 0;
417 int64_t delay;
418 pa_usec_t now1, now2;
419
420 pa_assert(u);
421
422 now1 = pa_rtclock_usec();
423 now2 = pa_smoother_get(u->smoother, now1);
424
425 delay = (int64_t) pa_bytes_to_usec(u->frame_index * u->frame_size, &u->sink->sample_spec) - now2;
426
427 if (delay > 0)
428 r = (pa_usec_t) delay;
429
430 if (u->memchunk.memblock)
431 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
432
433 return r;
434 }
435
436 static int build_pollfd(struct userdata *u) {
437 int err;
438 struct pollfd *pollfd;
439 int n;
440
441 pa_assert(u);
442 pa_assert(u->pcm_handle);
443
444 if ((n = snd_pcm_poll_descriptors_count(u->pcm_handle)) < 0) {
445 pa_log("snd_pcm_poll_descriptors_count() failed: %s", snd_strerror(n));
446 return -1;
447 }
448
449 if (u->alsa_rtpoll_item)
450 pa_rtpoll_item_free(u->alsa_rtpoll_item);
451
452 u->alsa_rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, n);
453 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, NULL);
454
455 if ((err = snd_pcm_poll_descriptors(u->pcm_handle, pollfd, n)) < 0) {
456 pa_log("snd_pcm_poll_descriptors() failed: %s", snd_strerror(err));
457 return -1;
458 }
459
460 return 0;
461 }
462
463 static int suspend(struct userdata *u) {
464 pa_assert(u);
465 pa_assert(u->pcm_handle);
466
467 pa_smoother_pause(u->smoother, pa_rtclock_usec());
468
469 /* Let's suspend */
470 snd_pcm_drain(u->pcm_handle);
471 snd_pcm_close(u->pcm_handle);
472 u->pcm_handle = NULL;
473
474 if (u->alsa_rtpoll_item) {
475 pa_rtpoll_item_free(u->alsa_rtpoll_item);
476 u->alsa_rtpoll_item = NULL;
477 }
478
479 pa_log_info("Device suspended...");
480
481 return 0;
482 }
483
484 static pa_usec_t hw_sleep_time(struct userdata *u) {
485 pa_usec_t usec, wm;
486
487 pa_assert(u);
488
489 usec = pa_sink_get_requested_latency_within_thread(u->sink);
490
491 if (usec == (pa_usec_t) -1)
492 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
493
494 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
495
496 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
497
498 if (usec >= wm)
499 usec -= wm;
500 else
501 usec /= 2;
502
503 if (u->first) {
504 pa_log_debug("Decreasing wakeup time for the first iteration by half.");
505 usec /= 2;
506 }
507
508 /* pa_log_debug("after watermark: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
509
510 return usec;
511 }
512
513 static int update_sw_params(struct userdata *u) {
514 int err;
515 pa_usec_t latency;
516
517 pa_assert(u);
518
519 /* Use the full buffer if noone asked us for anything specific */
520 u->hwbuf_unused_frames = 0;
521
522 if (u->use_tsched)
523 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
524 size_t b;
525
526 pa_log_debug("latency set to %llu", (unsigned long long) latency);
527
528 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
529
530 /* We need at least one sample in our buffer */
531
532 if (PA_UNLIKELY(b < u->frame_size))
533 b = u->frame_size;
534
535 u->hwbuf_unused_frames =
536 PA_LIKELY(b < u->hwbuf_size) ?
537 ((u->hwbuf_size - b) / u->frame_size) : 0;
538
539 fix_tsched_watermark(u);
540 }
541
542 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
543
544 /* We need at last one frame in the used part of the buffer */
545 u->avail_min_frames = u->hwbuf_unused_frames + 1;
546
547 if (u->use_tsched) {
548 pa_usec_t usec;
549
550 usec = hw_sleep_time(u);
551
552 u->avail_min_frames += (pa_usec_to_bytes(usec, &u->sink->sample_spec) / u->frame_size);
553 }
554
555 pa_log_debug("setting avail_min=%lu", (unsigned long) u->avail_min_frames);
556
557 if ((err = pa_alsa_set_sw_params(u->pcm_handle, u->avail_min_frames)) < 0) {
558 pa_log("Failed to set software parameters: %s", snd_strerror(err));
559 return err;
560 }
561
562 return 0;
563 }
564
565 static int unsuspend(struct userdata *u) {
566 pa_sample_spec ss;
567 int err;
568 pa_bool_t b, d;
569 unsigned nfrags;
570 snd_pcm_uframes_t period_size;
571
572 pa_assert(u);
573 pa_assert(!u->pcm_handle);
574
575 pa_log_info("Trying resume...");
576
577 snd_config_update_free_global();
578 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
579 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
580 goto fail;
581 }
582
583 ss = u->sink->sample_spec;
584 nfrags = u->nfragments;
585 period_size = u->fragment_size / u->frame_size;
586 b = u->use_mmap;
587 d = u->use_tsched;
588
589 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
590 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
591 goto fail;
592 }
593
594 if (b != u->use_mmap || d != u->use_tsched) {
595 pa_log_warn("Resume failed, couldn't get original access mode.");
596 goto fail;
597 }
598
599 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
600 pa_log_warn("Resume failed, couldn't restore original sample settings.");
601 goto fail;
602 }
603
604 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
605 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
606 goto fail;
607 }
608
609 if (update_sw_params(u) < 0)
610 goto fail;
611
612 if (build_pollfd(u) < 0)
613 goto fail;
614
615 /* FIXME: We need to reload the volume somehow */
616
617 u->first = TRUE;
618
619 pa_log_info("Resumed successfully...");
620
621 return 0;
622
623 fail:
624 if (u->pcm_handle) {
625 snd_pcm_close(u->pcm_handle);
626 u->pcm_handle = NULL;
627 }
628
629 return -1;
630 }
631
632 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
633 struct userdata *u = PA_SINK(o)->userdata;
634
635 switch (code) {
636
637 case PA_SINK_MESSAGE_GET_LATENCY: {
638 pa_usec_t r = 0;
639
640 if (u->pcm_handle)
641 r = sink_get_latency(u);
642
643 *((pa_usec_t*) data) = r;
644
645 return 0;
646 }
647
648 case PA_SINK_MESSAGE_SET_STATE:
649
650 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
651
652 case PA_SINK_SUSPENDED:
653 pa_assert(PA_SINK_OPENED(u->sink->thread_info.state));
654
655 if (suspend(u) < 0)
656 return -1;
657
658 break;
659
660 case PA_SINK_IDLE:
661 case PA_SINK_RUNNING:
662
663 if (u->sink->thread_info.state == PA_SINK_INIT) {
664 if (build_pollfd(u) < 0)
665 return -1;
666 }
667
668 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
669 if (unsuspend(u) < 0)
670 return -1;
671 }
672
673 break;
674
675 case PA_SINK_UNLINKED:
676 case PA_SINK_INIT:
677 ;
678 }
679
680 break;
681
682 /* case PA_SINK_MESSAGE_ADD_INPUT: */
683 /* case PA_SINK_MESSAGE_REMOVE_INPUT: */
684 /* case PA_SINK_MESSAGE_REMOVE_INPUT_AND_BUFFER: { */
685 /* int r = pa_sink_process_msg(o, code, data, offset, chunk); */
686 /* update_hwbuf_unused_frames(u); */
687 /* return r; */
688 /* } */
689 }
690
691 return pa_sink_process_msg(o, code, data, offset, chunk);
692 }
693
694 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
695 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
696
697 pa_assert(u);
698 pa_assert(u->mixer_handle);
699
700 if (mask == SND_CTL_EVENT_MASK_REMOVE)
701 return 0;
702
703 if (mask & SND_CTL_EVENT_MASK_VALUE) {
704 pa_sink_get_volume(u->sink);
705 pa_sink_get_mute(u->sink);
706 }
707
708 return 0;
709 }
710
711 static int sink_get_volume_cb(pa_sink *s) {
712 struct userdata *u = s->userdata;
713 int err;
714 int i;
715
716 pa_assert(u);
717 pa_assert(u->mixer_elem);
718
719 for (i = 0; i < s->sample_spec.channels; i++) {
720 long alsa_vol;
721
722 pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
723
724 if (u->hw_dB_supported) {
725
726 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) >= 0) {
727 s->volume.values[i] = pa_sw_volume_from_dB(alsa_vol / 100.0);
728 continue;
729 }
730
731 u->hw_dB_supported = FALSE;
732 }
733
734 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
735 goto fail;
736
737 s->volume.values[i] = (pa_volume_t) roundf(((float) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
738 }
739
740 return 0;
741
742 fail:
743 pa_log_error("Unable to read volume: %s", snd_strerror(err));
744
745 return -1;
746 }
747
748 static int sink_set_volume_cb(pa_sink *s) {
749 struct userdata *u = s->userdata;
750 int err;
751 int i;
752
753 pa_assert(u);
754 pa_assert(u->mixer_elem);
755
756 for (i = 0; i < s->sample_spec.channels; i++) {
757 long alsa_vol;
758 pa_volume_t vol;
759
760 pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
761
762 vol = PA_MIN(s->volume.values[i], PA_VOLUME_NORM);
763
764 if (u->hw_dB_supported) {
765 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
766 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
767
768 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, -1)) >= 0) {
769
770 if (snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol) >= 0)
771 s->volume.values[i] = pa_sw_volume_from_dB(alsa_vol / 100.0);
772
773 continue;
774 }
775
776 u->hw_dB_supported = FALSE;
777
778 }
779
780 alsa_vol = (long) roundf(((float) vol * (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
781 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
782
783 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
784 goto fail;
785
786 if (snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol) >= 0)
787 s->volume.values[i] = (pa_volume_t) roundf(((float) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
788 }
789
790 return 0;
791
792 fail:
793 pa_log_error("Unable to set volume: %s", snd_strerror(err));
794
795 return -1;
796 }
797
798 static int sink_get_mute_cb(pa_sink *s) {
799 struct userdata *u = s->userdata;
800 int err, sw;
801
802 pa_assert(u);
803 pa_assert(u->mixer_elem);
804
805 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
806 pa_log_error("Unable to get switch: %s", snd_strerror(err));
807 return -1;
808 }
809
810 s->muted = !sw;
811
812 return 0;
813 }
814
815 static int sink_set_mute_cb(pa_sink *s) {
816 struct userdata *u = s->userdata;
817 int err;
818
819 pa_assert(u);
820 pa_assert(u->mixer_elem);
821
822 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
823 pa_log_error("Unable to set switch: %s", snd_strerror(err));
824 return -1;
825 }
826
827 return 0;
828 }
829
830 static void sink_update_requested_latency_cb(pa_sink *s) {
831 struct userdata *u = s->userdata;
832 pa_assert(u);
833
834 update_sw_params(u);
835 }
836
837 static int process_rewind(struct userdata *u) {
838 snd_pcm_sframes_t unused;
839 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
840 pa_assert(u);
841
842 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
843 u->sink->thread_info.rewind_nbytes = 0;
844
845 pa_assert(rewind_nbytes > 0);
846 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
847
848 snd_pcm_hwsync(u->pcm_handle);
849 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
850 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror(unused));
851 return -1;
852 }
853
854 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
855
856 if (u->hwbuf_size > unused_nbytes)
857 limit_nbytes = u->hwbuf_size - unused_nbytes;
858 else
859 limit_nbytes = 0;
860
861 if (rewind_nbytes > limit_nbytes)
862 rewind_nbytes = limit_nbytes;
863
864 if (rewind_nbytes > 0) {
865 snd_pcm_sframes_t in_frames, out_frames;
866
867 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
868
869 in_frames = (snd_pcm_sframes_t) rewind_nbytes / u->frame_size;
870 pa_log_debug("before: %lu", (unsigned long) in_frames);
871 if ((out_frames = snd_pcm_rewind(u->pcm_handle, in_frames)) < 0) {
872 pa_log("snd_pcm_rewind() failed: %s", snd_strerror(out_frames));
873 return -1;
874 }
875 pa_log_debug("after: %lu", (unsigned long) out_frames);
876
877 rewind_nbytes = out_frames * u->frame_size;
878
879 if (rewind_nbytes <= 0)
880 pa_log_info("Tried rewind, but was apparently not possible.");
881 else {
882 u->frame_index -= out_frames;
883 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
884 pa_sink_process_rewind(u->sink, rewind_nbytes);
885 }
886 } else
887 pa_log_debug("Mhmm, actually there is nothing to rewind.");
888
889 return 0;
890 }
891
892 static void thread_func(void *userdata) {
893 struct userdata *u = userdata;
894
895 pa_assert(u);
896
897 pa_log_debug("Thread starting up");
898
899 if (u->core->realtime_scheduling)
900 pa_make_realtime(u->core->realtime_priority);
901
902 pa_thread_mq_install(&u->thread_mq);
903 pa_rtpoll_install(u->rtpoll);
904
905 for (;;) {
906 int ret;
907
908 /* pa_log_debug("loop"); */
909
910 /* Render some data and write it to the dsp */
911 if (PA_SINK_OPENED(u->sink->thread_info.state)) {
912 int work_done = 0;
913
914 if (u->sink->thread_info.rewind_nbytes > 0)
915 if (process_rewind(u) < 0)
916 goto fail;
917
918 if (u->use_mmap)
919 work_done = mmap_write(u);
920 else
921 work_done = unix_write(u);
922
923 if (work_done < 0)
924 goto fail;
925
926 /* pa_log_debug("work_done = %i", work_done); */
927
928 if (work_done) {
929
930 if (u->first) {
931 pa_log_info("Starting playback.");
932 snd_pcm_start(u->pcm_handle);
933
934 pa_smoother_resume(u->smoother, pa_rtclock_usec());
935 }
936
937 update_smoother(u);
938 }
939
940 if (u->use_tsched) {
941 pa_usec_t usec, cusec;
942
943 /* OK, the playback buffer is now full, let's
944 * calculate when to wake up next */
945
946 usec = hw_sleep_time(u);
947
948 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) usec / PA_USEC_PER_MSEC); */
949
950 /* Convert from the sound card time domain to the
951 * system time domain */
952 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), usec);
953
954 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
955
956 /* We don't trust the conversion, so we wake up whatever comes first */
957 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(usec, cusec));
958 }
959
960 u->first = FALSE;
961
962 } else if (u->use_tsched)
963
964 /* OK, we're in an invalid state, let's disable our timers */
965 pa_rtpoll_set_timer_disabled(u->rtpoll);
966
967 /* Hmm, nothing to do. Let's sleep */
968 if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
969 goto fail;
970
971 if (ret == 0)
972 goto finish;
973
974 /* Tell ALSA about this and process its response */
975 if (PA_SINK_OPENED(u->sink->thread_info.state)) {
976 struct pollfd *pollfd;
977 unsigned short revents = 0;
978 int err;
979 unsigned n;
980
981 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
982
983 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
984 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
985 goto fail;
986 }
987
988 if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
989 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
990 goto fail;
991
992 u->first = TRUE;
993 }
994
995 if (revents)
996 pa_log_debug("Wakeup from ALSA! (%i)", revents);
997 }
998 }
999
1000 fail:
1001 /* If this was no regular exit from the loop we have to continue
1002 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1003 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1004 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1005
1006 finish:
1007 pa_log_debug("Thread shutting down");
1008 }
1009
1010 int pa__init(pa_module*m) {
1011
1012 pa_modargs *ma = NULL;
1013 struct userdata *u = NULL;
1014 const char *dev_id;
1015 pa_sample_spec ss;
1016 pa_channel_map map;
1017 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1018 snd_pcm_uframes_t period_frames, tsched_frames;
1019 size_t frame_size;
1020 snd_pcm_info_t *pcm_info = NULL;
1021 int err;
1022 const char *name;
1023 char *name_buf = NULL;
1024 pa_bool_t namereg_fail;
1025 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, mixer_reset = TRUE;
1026 pa_usec_t usec;
1027 pa_sink_new_data data;
1028
1029 snd_pcm_info_alloca(&pcm_info);
1030
1031 pa_assert(m);
1032
1033 pa_alsa_redirect_errors_inc();
1034
1035 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1036 pa_log("Failed to parse module arguments");
1037 goto fail;
1038 }
1039
1040 ss = m->core->default_sample_spec;
1041 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1042 pa_log("Failed to parse sample specification and channel map");
1043 goto fail;
1044 }
1045
1046 frame_size = pa_frame_size(&ss);
1047
1048 nfrags = m->core->default_n_fragments;
1049 frag_size = pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1050 if (frag_size <= 0)
1051 frag_size = frame_size;
1052 tsched_size = pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1053 tsched_watermark = pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1054
1055 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1056 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1057 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1058 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1059 pa_log("Failed to parse buffer metrics");
1060 goto fail;
1061 }
1062
1063 hwbuf_size = frag_size * nfrags;
1064 period_frames = frag_size/frame_size;
1065 tsched_frames = tsched_size/frame_size;
1066
1067 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1068 pa_log("Failed to parse mmap argument.");
1069 goto fail;
1070 }
1071
1072 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1073 pa_log("Failed to parse timer_scheduling argument.");
1074 goto fail;
1075 }
1076
1077 if (use_tsched && !pa_rtclock_hrtimer()) {
1078 pa_log("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1079 use_tsched = FALSE;
1080 }
1081
1082 if (pa_modargs_get_value_boolean(ma, "mixer_reset", &mixer_reset) < 0) {
1083 pa_log("Failed to parse mixer_reset argument.");
1084 goto fail;
1085 }
1086
1087 u = pa_xnew0(struct userdata, 1);
1088 u->core = m->core;
1089 u->module = m;
1090 m->userdata = u;
1091 u->use_mmap = use_mmap;
1092 u->use_tsched = use_tsched;
1093 u->first = TRUE;
1094 pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
1095 u->rtpoll = pa_rtpoll_new();
1096 u->alsa_rtpoll_item = NULL;
1097 pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
1098
1099 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE);
1100 usec = pa_rtclock_usec();
1101 pa_smoother_set_time_offset(u->smoother, usec);
1102 pa_smoother_pause(u->smoother, usec);
1103
1104 snd_config_update_free_global();
1105
1106 b = use_mmap;
1107 d = use_tsched;
1108
1109 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1110
1111 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1112 dev_id,
1113 &u->device_name,
1114 &ss, &map,
1115 SND_PCM_STREAM_PLAYBACK,
1116 &nfrags, &period_frames, tsched_frames,
1117 &b, &d)))
1118
1119 goto fail;
1120
1121 } else {
1122
1123 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1124 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1125 &u->device_name,
1126 &ss, &map,
1127 SND_PCM_STREAM_PLAYBACK,
1128 &nfrags, &period_frames, tsched_frames,
1129 &b, &d)))
1130 goto fail;
1131
1132 }
1133
1134 pa_assert(u->device_name);
1135 pa_log_info("Successfully opened device %s.", u->device_name);
1136
1137 if (use_mmap && !b) {
1138 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1139 u->use_mmap = use_mmap = FALSE;
1140 }
1141
1142 if (use_tsched && (!b || !d)) {
1143 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1144 u->use_tsched = use_tsched = FALSE;
1145 }
1146
1147 if (u->use_mmap)
1148 pa_log_info("Successfully enabled mmap() mode.");
1149
1150 if (u->use_tsched)
1151 pa_log_info("Successfully enabled timer-based scheduling mode.");
1152
1153 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1154 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1155 goto fail;
1156 }
1157
1158 /* ALSA might tweak the sample spec, so recalculate the frame size */
1159 frame_size = pa_frame_size(&ss);
1160
1161 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1162 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1163 else {
1164 pa_bool_t found = FALSE;
1165
1166 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1167 found = TRUE;
1168 else {
1169 snd_pcm_info_t *info;
1170
1171 snd_pcm_info_alloca(&info);
1172
1173 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1174 char *md;
1175 int card;
1176
1177 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1178
1179 md = pa_sprintf_malloc("hw:%i", card);
1180
1181 if (strcmp(u->device_name, md))
1182 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1183 found = TRUE;
1184 pa_xfree(md);
1185 }
1186 }
1187 }
1188
1189 if (found)
1190 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM")))
1191 found = FALSE;
1192
1193 if (!found) {
1194 snd_mixer_close(u->mixer_handle);
1195 u->mixer_handle = NULL;
1196 }
1197 }
1198
1199 if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
1200 namereg_fail = TRUE;
1201 else {
1202 name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
1203 namereg_fail = FALSE;
1204 }
1205
1206 pa_sink_new_data_init(&data);
1207 data.driver = __FILE__;
1208 data.module = m;
1209 pa_sink_new_data_set_name(&data, name);
1210 data.namereg_fail = namereg_fail;
1211 pa_sink_new_data_set_sample_spec(&data, &ss);
1212 pa_sink_new_data_set_channel_map(&data, &map);
1213
1214 pa_alsa_init_proplist(data.proplist, pcm_info);
1215 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1216 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1217 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1218 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1219
1220 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1221 pa_sink_new_data_done(&data);
1222 pa_xfree(name_buf);
1223
1224 if (!u->sink) {
1225 pa_log("Failed to create sink object");
1226 goto fail;
1227 }
1228
1229 u->sink->parent.process_msg = sink_process_msg;
1230 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1231 u->sink->userdata = u;
1232
1233 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1234 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1235
1236 u->frame_size = frame_size;
1237 u->fragment_size = frag_size = period_frames * frame_size;
1238 u->nfragments = nfrags;
1239 u->hwbuf_size = u->fragment_size * nfrags;
1240 u->hwbuf_unused_frames = 0;
1241 u->avail_min_frames = 0;
1242 u->tsched_watermark = tsched_watermark;
1243 u->frame_index = 0;
1244 u->hw_dB_supported = FALSE;
1245 u->hw_dB_min = u->hw_dB_max = 0;
1246 u->hw_volume_min = u->hw_volume_max = 0;
1247
1248 if (use_tsched)
1249 if (u->tsched_watermark >= u->hwbuf_size/2)
1250 u->tsched_watermark = pa_frame_align(u->hwbuf_size/2, &ss);
1251
1252 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1253 u->sink->max_latency = pa_bytes_to_usec(u->hwbuf_size, &ss);
1254
1255 if (!use_tsched)
1256 u->sink->min_latency = u->sink->max_latency;
1257
1258 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1259 nfrags, (long unsigned) u->fragment_size,
1260 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1261
1262 if (use_tsched)
1263 pa_log_info("Time scheduling watermark is %0.2fms",
1264 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1265
1266 if (update_sw_params(u) < 0)
1267 goto fail;
1268
1269 pa_memchunk_reset(&u->memchunk);
1270
1271 if (u->mixer_handle) {
1272 pa_assert(u->mixer_elem);
1273
1274 if (snd_mixer_selem_has_playback_volume(u->mixer_elem))
1275
1276 if (pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0 &&
1277 snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) >= 0) {
1278
1279 pa_bool_t suitable = TRUE;
1280
1281 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1282
1283 if (u->hw_volume_min > u->hw_volume_max) {
1284
1285 pa_log_info("Minimal volume %li larger than maximum volume %li. Strange stuff Falling back to software volume control.", u->hw_volume_min, u->hw_volume_max);
1286 suitable = FALSE;
1287
1288 } else if (u->hw_volume_max - u->hw_volume_min < 3) {
1289
1290 pa_log_info("Device has less than 4 volume levels. Falling back to software volume control.");
1291 suitable = FALSE;
1292
1293 } else if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) >= 0) {
1294
1295 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", u->hw_dB_min/100.0, u->hw_dB_max/100.0);
1296
1297 /* Let's see if this thing actually is useful for muting */
1298 if (u->hw_dB_min > -6000) {
1299 pa_log_info("Device cannot attenuate for more than -60 dB (only %0.2f dB supported), falling back to software volume control.", ((double) u->hw_dB_min) / 100);
1300
1301 suitable = FALSE;
1302 } else if (u->hw_dB_max < 0) {
1303
1304 pa_log_info("Device is still attenuated at maximum volume setting (%0.2f dB is maximum). Strange stuff. Falling back to software volume control.", ((double) u->hw_dB_max) / 100);
1305 suitable = FALSE;
1306
1307 } else if (u->hw_dB_min >= u->hw_dB_max) {
1308
1309 pa_log_info("Minimal dB (%0.2f) larger or equal to maximum dB (%0.2f). Strange stuff. Falling back to software volume control.", ((double) u->hw_dB_min) / 100, ((double) u->hw_dB_max) / 100);
1310 suitable = FALSE;
1311
1312 } else {
1313
1314 if (u->hw_dB_max > 0) {
1315 /* dB > 0 means overamplification, and clipping, we don't want that here */
1316 pa_log_info("Device can do overamplification for %0.2f dB. Limiting to 0 db", ((double) u->hw_dB_max) / 100);
1317 u->hw_dB_max = 0;
1318 }
1319
1320 u->hw_dB_supported = TRUE;
1321 }
1322 }
1323
1324 if (suitable) {
1325 u->sink->get_volume = sink_get_volume_cb;
1326 u->sink->set_volume = sink_set_volume_cb;
1327 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1328 pa_log_info("Using hardware volume control. %s dB scale.", u->hw_dB_supported ? "Using" : "Not using");
1329
1330 } else if (mixer_reset) {
1331 pa_log_info("Using software volume control. Trying to reset sound card to 0 dB.");
1332 pa_alsa_0dB_playback(u->mixer_elem);
1333 } else
1334 pa_log_info("Using software volume control. Leaving hw mixer controls untouched.");
1335 }
1336
1337 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1338 u->sink->get_mute = sink_get_mute_cb;
1339 u->sink->set_mute = sink_set_mute_cb;
1340 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1341 }
1342
1343 u->mixer_fdl = pa_alsa_fdlist_new();
1344
1345 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1346 pa_log("Failed to initialize file descriptor monitoring");
1347 goto fail;
1348 }
1349
1350 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1351 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1352 } else
1353 u->mixer_fdl = NULL;
1354
1355 pa_alsa_dump(u->pcm_handle);
1356
1357 if (!(u->thread = pa_thread_new(thread_func, u))) {
1358 pa_log("Failed to create thread.");
1359 goto fail;
1360 }
1361
1362 /* Get initial mixer settings */
1363 if (data.volume_is_set) {
1364 if (u->sink->set_volume)
1365 u->sink->set_volume(u->sink);
1366 } else {
1367 if (u->sink->get_volume)
1368 u->sink->get_volume(u->sink);
1369 }
1370
1371 if (data.muted_is_set) {
1372 if (u->sink->set_mute)
1373 u->sink->set_mute(u->sink);
1374 } else {
1375 if (u->sink->get_mute)
1376 u->sink->get_mute(u->sink);
1377 }
1378
1379 pa_sink_put(u->sink);
1380
1381 pa_modargs_free(ma);
1382
1383 return 0;
1384
1385 fail:
1386
1387 if (ma)
1388 pa_modargs_free(ma);
1389
1390 pa__done(m);
1391
1392 return -1;
1393 }
1394
1395 void pa__done(pa_module*m) {
1396 struct userdata *u;
1397
1398 pa_assert(m);
1399
1400 if (!(u = m->userdata)) {
1401 pa_alsa_redirect_errors_dec();
1402 return;
1403 }
1404
1405 if (u->sink)
1406 pa_sink_unlink(u->sink);
1407
1408 if (u->thread) {
1409 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1410 pa_thread_free(u->thread);
1411 }
1412
1413 pa_thread_mq_done(&u->thread_mq);
1414
1415 if (u->sink)
1416 pa_sink_unref(u->sink);
1417
1418 if (u->memchunk.memblock)
1419 pa_memblock_unref(u->memchunk.memblock);
1420
1421 if (u->alsa_rtpoll_item)
1422 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1423
1424 if (u->rtpoll)
1425 pa_rtpoll_free(u->rtpoll);
1426
1427 if (u->mixer_fdl)
1428 pa_alsa_fdlist_free(u->mixer_fdl);
1429
1430 if (u->mixer_handle)
1431 snd_mixer_close(u->mixer_handle);
1432
1433 if (u->pcm_handle) {
1434 snd_pcm_drop(u->pcm_handle);
1435 snd_pcm_close(u->pcm_handle);
1436 }
1437
1438 if (u->smoother)
1439 pa_smoother_free(u->smoother);
1440
1441 pa_xfree(u->device_name);
1442 pa_xfree(u);
1443
1444 snd_config_update_free_global();
1445
1446 pa_alsa_redirect_errors_dec();
1447 }