]> code.delx.au - pulseaudio/blob - src/modules/module-alsa-source.c
Merge branch 'master' of git://git.0pointer.de/pulseaudio
[pulseaudio] / src / modules / module-alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38
39 #include <pulsecore/core-error.h>
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/time-smoother.h>
54 #include <pulsecore/rtclock.h>
55
56 #include "alsa-util.h"
57 #include "module-alsa-source-symdef.h"
58
59 PA_MODULE_AUTHOR("Lennart Poettering");
60 PA_MODULE_DESCRIPTION("ALSA Source");
61 PA_MODULE_VERSION(PACKAGE_VERSION);
62 PA_MODULE_LOAD_ONCE(FALSE);
63 PA_MODULE_USAGE(
64 "source_name=<name for the source> "
65 "device=<ALSA device> "
66 "device_id=<ALSA card index> "
67 "format=<sample format> "
68 "rate=<sample rate> "
69 "channels=<number of channels> "
70 "channel_map=<channel map> "
71 "fragments=<number of fragments> "
72 "fragment_size=<fragment size> "
73 "mmap=<enable memory mapping?> "
74 "tsched=<enable system timer based scheduling mode?> "
75 "tsched_buffer_size=<buffer size when using timer based scheduling> "
76 "tsched_buffer_watermark=<upper fill watermark>");
77
78 static const char* const valid_modargs[] = {
79 "source_name",
80 "device",
81 "device_id",
82 "format",
83 "rate",
84 "channels",
85 "channel_map",
86 "fragments",
87 "fragment_size",
88 "mmap",
89 "tsched",
90 "tsched_buffer_size",
91 "tsched_buffer_watermark",
92 NULL
93 };
94
95 #define DEFAULT_DEVICE "default"
96 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
97 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
98 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
99 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
100
101 struct userdata {
102 pa_core *core;
103 pa_module *module;
104 pa_source *source;
105
106 pa_thread *thread;
107 pa_thread_mq thread_mq;
108 pa_rtpoll *rtpoll;
109
110 snd_pcm_t *pcm_handle;
111
112 pa_alsa_fdlist *mixer_fdl;
113 snd_mixer_t *mixer_handle;
114 snd_mixer_elem_t *mixer_elem;
115 long hw_volume_max, hw_volume_min;
116 long hw_dB_max, hw_dB_min;
117 pa_bool_t hw_dB_supported;
118 pa_bool_t mixer_seperate_channels;
119
120 pa_cvolume hardware_volume;
121
122 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
123 unsigned nfragments;
124
125 char *device_name;
126
127 pa_bool_t use_mmap, use_tsched;
128
129 pa_rtpoll_item *alsa_rtpoll_item;
130
131 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
132
133 pa_smoother *smoother;
134 int64_t frame_index;
135
136 snd_pcm_sframes_t hwbuf_unused_frames;
137 };
138
139 static void fix_tsched_watermark(struct userdata *u) {
140 size_t max_use;
141 size_t min_sleep, min_wakeup;
142 pa_assert(u);
143
144 max_use = u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size;
145
146 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
147 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
148
149 if (min_sleep > max_use/2)
150 min_sleep = pa_frame_align(max_use/2, &u->source->sample_spec);
151 if (min_sleep < u->frame_size)
152 min_sleep = u->frame_size;
153
154 if (min_wakeup > max_use/2)
155 min_wakeup = pa_frame_align(max_use/2, &u->source->sample_spec);
156 if (min_wakeup < u->frame_size)
157 min_wakeup = u->frame_size;
158
159 if (u->tsched_watermark > max_use-min_sleep)
160 u->tsched_watermark = max_use-min_sleep;
161
162 if (u->tsched_watermark < min_wakeup)
163 u->tsched_watermark = min_wakeup;
164 }
165
166 static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
167 pa_usec_t wm, usec;
168
169 pa_assert(u);
170
171 usec = pa_source_get_requested_latency_within_thread(u->source);
172
173 if (usec == (pa_usec_t) -1)
174 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
175
176 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
177
178 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
179
180 if (usec >= wm) {
181 *sleep_usec = usec - wm;
182 *process_usec = wm;
183 } else
184 *process_usec = *sleep_usec = usec /= 2;
185
186 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
187
188 return usec;
189 }
190
191 static int try_recover(struct userdata *u, const char *call, int err) {
192 pa_assert(u);
193 pa_assert(call);
194 pa_assert(err < 0);
195
196 pa_log_debug("%s: %s", call, snd_strerror(err));
197
198 pa_assert(err != -EAGAIN);
199
200 if (err == -EPIPE)
201 pa_log_debug("%s: Buffer overrun!", call);
202
203 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
204 snd_pcm_start(u->pcm_handle);
205 return 0;
206 }
207
208 pa_log("%s: %s", call, snd_strerror(err));
209 return -1;
210 }
211
212 static size_t check_left_to_record(struct userdata *u, snd_pcm_sframes_t n) {
213 size_t left_to_record;
214
215 if ((size_t) n*u->frame_size < u->hwbuf_size)
216 left_to_record = u->hwbuf_size - ((size_t) n*u->frame_size);
217 else
218 left_to_record = 0;
219
220 if (left_to_record > 0) {
221 /* pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC); */
222 } else {
223 pa_log_info("Overrun!");
224
225 if (u->use_tsched) {
226 size_t old_watermark = u->tsched_watermark;
227
228 u->tsched_watermark *= 2;
229 fix_tsched_watermark(u);
230
231 if (old_watermark != u->tsched_watermark)
232 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
233 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
234 }
235 }
236
237 return left_to_record;
238 }
239
240 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec) {
241 int work_done = 0;
242 pa_usec_t max_sleep_usec = 0, process_usec = 0;
243 size_t left_to_record;
244
245 pa_assert(u);
246 pa_source_assert_ref(u->source);
247
248 if (u->use_tsched)
249 hw_sleep_time(u, &max_sleep_usec, &process_usec);
250
251 for (;;) {
252 snd_pcm_sframes_t n;
253 int r;
254
255 snd_pcm_hwsync(u->pcm_handle);
256
257 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
258
259 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
260 continue;
261
262 return r;
263 }
264
265 left_to_record = check_left_to_record(u, n);
266
267 if (u->use_tsched)
268 if (pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
269 break;
270
271 if (PA_UNLIKELY(n <= 0))
272 break;
273
274 for (;;) {
275 int err;
276 const snd_pcm_channel_area_t *areas;
277 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
278 pa_memchunk chunk;
279 void *p;
280 snd_pcm_sframes_t sframes;
281
282 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
283
284 if (PA_UNLIKELY((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0)) {
285
286 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
287 continue;
288
289 return r;
290 }
291
292 /* Make sure that if these memblocks need to be copied they will fit into one slot */
293 if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
294 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
295
296 /* Check these are multiples of 8 bit */
297 pa_assert((areas[0].first & 7) == 0);
298 pa_assert((areas[0].step & 7)== 0);
299
300 /* We assume a single interleaved memory buffer */
301 pa_assert((areas[0].first >> 3) == 0);
302 pa_assert((areas[0].step >> 3) == u->frame_size);
303
304 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
305
306 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
307 chunk.length = pa_memblock_get_length(chunk.memblock);
308 chunk.index = 0;
309
310 pa_source_post(u->source, &chunk);
311 pa_memblock_unref_fixed(chunk.memblock);
312
313 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
314
315 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
316 continue;
317
318 return r;
319 }
320
321 work_done = 1;
322
323 u->frame_index += (int64_t) frames;
324
325 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
326
327 if (frames >= (snd_pcm_uframes_t) n)
328 break;
329
330 n -= (snd_pcm_sframes_t) frames;
331 }
332 }
333
334 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec) - process_usec;
335 return work_done;
336 }
337
338 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec) {
339 int work_done = 0;
340 pa_usec_t max_sleep_usec = 0, process_usec = 0;
341 size_t left_to_record;
342
343 pa_assert(u);
344 pa_source_assert_ref(u->source);
345
346 if (u->use_tsched)
347 hw_sleep_time(u, &max_sleep_usec, &process_usec);
348
349 for (;;) {
350 snd_pcm_sframes_t n;
351 int r;
352
353 snd_pcm_hwsync(u->pcm_handle);
354
355 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
356
357 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
358 continue;
359
360 return r;
361 }
362
363 left_to_record = check_left_to_record(u, n);
364
365 if (u->use_tsched)
366 if (pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
367 break;
368
369 if (PA_UNLIKELY(n <= 0))
370 return work_done;
371
372 for (;;) {
373 void *p;
374 snd_pcm_sframes_t frames;
375 pa_memchunk chunk;
376
377 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
378
379 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
380
381 if (frames > n)
382 frames = n;
383
384 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
385
386 p = pa_memblock_acquire(chunk.memblock);
387 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
388 pa_memblock_release(chunk.memblock);
389
390 pa_assert(frames != 0);
391
392 if (PA_UNLIKELY(frames < 0)) {
393 pa_memblock_unref(chunk.memblock);
394
395 if ((r = try_recover(u, "snd_pcm_readi", (int) (frames))) == 0)
396 continue;
397
398 return r;
399 }
400
401 chunk.index = 0;
402 chunk.length = (size_t) frames * u->frame_size;
403
404 pa_source_post(u->source, &chunk);
405 pa_memblock_unref(chunk.memblock);
406
407 work_done = 1;
408
409 u->frame_index += frames;
410
411 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
412
413 if (frames >= n)
414 break;
415
416 n -= frames;
417 }
418 }
419
420 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec) - process_usec;
421 return work_done;
422 }
423
424 static void update_smoother(struct userdata *u) {
425 snd_pcm_sframes_t delay = 0;
426 int64_t frames;
427 int err;
428 pa_usec_t now1, now2;
429
430 pa_assert(u);
431 pa_assert(u->pcm_handle);
432
433 /* Let's update the time smoother */
434
435 snd_pcm_hwsync(u->pcm_handle);
436 snd_pcm_avail_update(u->pcm_handle);
437
438 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
439 pa_log_warn("Failed to get delay: %s", snd_strerror(err));
440 return;
441 }
442
443 frames = u->frame_index + delay;
444
445 now1 = pa_rtclock_usec();
446 now2 = pa_bytes_to_usec((uint64_t) frames * u->frame_size, &u->source->sample_spec);
447
448 pa_smoother_put(u->smoother, now1, now2);
449 }
450
451 static pa_usec_t source_get_latency(struct userdata *u) {
452 pa_usec_t r = 0;
453 int64_t delay;
454 pa_usec_t now1, now2;
455
456 pa_assert(u);
457
458 now1 = pa_rtclock_usec();
459 now2 = pa_smoother_get(u->smoother, now1);
460
461 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec((uint64_t) u->frame_index * u->frame_size, &u->source->sample_spec);
462
463 if (delay > 0)
464 r = (pa_usec_t) delay;
465
466 return r;
467 }
468
469 static int build_pollfd(struct userdata *u) {
470 pa_assert(u);
471 pa_assert(u->pcm_handle);
472
473 if (u->alsa_rtpoll_item)
474 pa_rtpoll_item_free(u->alsa_rtpoll_item);
475
476 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
477 return -1;
478
479 return 0;
480 }
481
482 static int suspend(struct userdata *u) {
483 pa_assert(u);
484 pa_assert(u->pcm_handle);
485
486 pa_smoother_pause(u->smoother, pa_rtclock_usec());
487
488 /* Let's suspend */
489 snd_pcm_close(u->pcm_handle);
490 u->pcm_handle = NULL;
491
492 if (u->alsa_rtpoll_item) {
493 pa_rtpoll_item_free(u->alsa_rtpoll_item);
494 u->alsa_rtpoll_item = NULL;
495 }
496
497 pa_log_info("Device suspended...");
498
499 return 0;
500 }
501
502 static int update_sw_params(struct userdata *u) {
503 snd_pcm_uframes_t avail_min;
504 int err;
505
506 pa_assert(u);
507
508 /* Use the full buffer if noone asked us for anything specific */
509 u->hwbuf_unused_frames = 0;
510
511 if (u->use_tsched) {
512 pa_usec_t latency;
513
514 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
515 size_t b;
516
517 pa_log_debug("latency set to %0.2f", (double) latency / PA_USEC_PER_MSEC);
518
519 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
520
521 /* We need at least one sample in our buffer */
522
523 if (PA_UNLIKELY(b < u->frame_size))
524 b = u->frame_size;
525
526 u->hwbuf_unused_frames = (snd_pcm_sframes_t)
527 (PA_LIKELY(b < u->hwbuf_size) ?
528 ((u->hwbuf_size - b) / u->frame_size) : 0);
529
530 fix_tsched_watermark(u);
531 }
532 }
533
534 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
535
536 avail_min = 1;
537
538 if (u->use_tsched) {
539 pa_usec_t sleep_usec, process_usec;
540
541 hw_sleep_time(u, &sleep_usec, &process_usec);
542 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec);
543 }
544
545 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
546
547 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
548 pa_log("Failed to set software parameters: %s", snd_strerror(err));
549 return err;
550 }
551
552 return 0;
553 }
554
555 static int unsuspend(struct userdata *u) {
556 pa_sample_spec ss;
557 int err;
558 pa_bool_t b, d;
559 unsigned nfrags;
560 snd_pcm_uframes_t period_size;
561
562 pa_assert(u);
563 pa_assert(!u->pcm_handle);
564
565 pa_log_info("Trying resume...");
566
567 snd_config_update_free_global();
568 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK)) < 0) {
569 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
570 goto fail;
571 }
572
573 ss = u->source->sample_spec;
574 nfrags = u->nfragments;
575 period_size = u->fragment_size / u->frame_size;
576 b = u->use_mmap;
577 d = u->use_tsched;
578
579 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
580 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
581 goto fail;
582 }
583
584 if (b != u->use_mmap || d != u->use_tsched) {
585 pa_log_warn("Resume failed, couldn't get original access mode.");
586 goto fail;
587 }
588
589 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
590 pa_log_warn("Resume failed, couldn't restore original sample settings.");
591 goto fail;
592 }
593
594 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
595 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
596 goto fail;
597 }
598
599 if (update_sw_params(u) < 0)
600 goto fail;
601
602 if (build_pollfd(u) < 0)
603 goto fail;
604
605 /* FIXME: We need to reload the volume somehow */
606
607 snd_pcm_start(u->pcm_handle);
608 pa_smoother_resume(u->smoother, pa_rtclock_usec());
609
610 pa_log_info("Resumed successfully...");
611
612 return 0;
613
614 fail:
615 if (u->pcm_handle) {
616 snd_pcm_close(u->pcm_handle);
617 u->pcm_handle = NULL;
618 }
619
620 return -1;
621 }
622
623 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
624 struct userdata *u = PA_SOURCE(o)->userdata;
625
626 switch (code) {
627
628 case PA_SOURCE_MESSAGE_GET_LATENCY: {
629 pa_usec_t r = 0;
630
631 if (u->pcm_handle)
632 r = source_get_latency(u);
633
634 *((pa_usec_t*) data) = r;
635
636 return 0;
637 }
638
639 case PA_SOURCE_MESSAGE_SET_STATE:
640
641 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
642
643 case PA_SOURCE_SUSPENDED:
644 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
645
646 if (suspend(u) < 0)
647 return -1;
648
649 break;
650
651 case PA_SOURCE_IDLE:
652 case PA_SOURCE_RUNNING:
653
654 if (u->source->thread_info.state == PA_SOURCE_INIT) {
655 if (build_pollfd(u) < 0)
656 return -1;
657
658 snd_pcm_start(u->pcm_handle);
659 }
660
661 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
662 if (unsuspend(u) < 0)
663 return -1;
664 }
665
666 break;
667
668 case PA_SOURCE_UNLINKED:
669 case PA_SOURCE_INIT:
670 ;
671 }
672
673 break;
674 }
675
676 return pa_source_process_msg(o, code, data, offset, chunk);
677 }
678
679 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
680 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
681
682 pa_assert(u);
683 pa_assert(u->mixer_handle);
684
685 if (mask == SND_CTL_EVENT_MASK_REMOVE)
686 return 0;
687
688 if (mask & SND_CTL_EVENT_MASK_VALUE) {
689 pa_source_get_volume(u->source, TRUE);
690 pa_source_get_mute(u->source, TRUE);
691 }
692
693 return 0;
694 }
695
696 static int source_get_volume_cb(pa_source *s) {
697 struct userdata *u = s->userdata;
698 int err;
699 unsigned i;
700 pa_cvolume r;
701 char t[PA_CVOLUME_SNPRINT_MAX];
702
703 pa_assert(u);
704 pa_assert(u->mixer_elem);
705
706 if (u->mixer_seperate_channels) {
707
708 r.channels = s->sample_spec.channels;
709
710 for (i = 0; i < s->sample_spec.channels; i++) {
711 long alsa_vol;
712
713 if (u->hw_dB_supported) {
714
715 if ((err = snd_mixer_selem_get_capture_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
716 goto fail;
717
718 #ifdef HAVE_VALGRIND_MEMCHECK_H
719 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
720 #endif
721
722 r.values[i] = pa_sw_volume_from_dB((double) alsa_vol / 100.0);
723 } else {
724
725 if ((err = snd_mixer_selem_get_capture_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
726 goto fail;
727
728 r.values[i] = (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (double) (u->hw_volume_max - u->hw_volume_min));
729 }
730 }
731
732 } else {
733 long alsa_vol;
734
735 pa_assert(u->hw_dB_supported);
736
737 if ((err = snd_mixer_selem_get_capture_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
738 goto fail;
739
740 #ifdef HAVE_VALGRIND_MEMCHECK_H
741 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
742 #endif
743
744 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) alsa_vol / 100.0));
745 }
746
747 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
748
749 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
750
751 u->hardware_volume = s->volume = r;
752
753 if (u->hw_dB_supported) {
754 pa_cvolume reset;
755
756 /* Hmm, so the hardware volume changed, let's reset our software volume */
757
758 pa_cvolume_reset(&reset, s->sample_spec.channels);
759 pa_source_set_soft_volume(s, &reset);
760 }
761 }
762
763 return 0;
764
765 fail:
766 pa_log_error("Unable to read volume: %s", snd_strerror(err));
767
768 return -1;
769 }
770
771 static int source_set_volume_cb(pa_source *s) {
772 struct userdata *u = s->userdata;
773 int err;
774 unsigned i;
775 pa_cvolume r;
776
777 pa_assert(u);
778 pa_assert(u->mixer_elem);
779
780 if (u->mixer_seperate_channels) {
781
782 r.channels = s->sample_spec.channels;
783
784 for (i = 0; i < s->sample_spec.channels; i++) {
785 long alsa_vol;
786 pa_volume_t vol;
787
788 vol = s->volume.values[i];
789
790 if (u->hw_dB_supported) {
791
792 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
793 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
794
795 if ((err = snd_mixer_selem_set_capture_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
796 goto fail;
797
798 if ((err = snd_mixer_selem_get_capture_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
799 goto fail;
800
801 r.values[i] = pa_sw_volume_from_dB((double) alsa_vol / 100.0);
802 } else {
803
804 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
805 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
806
807 if ((err = snd_mixer_selem_set_capture_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
808 goto fail;
809
810 if ((err = snd_mixer_selem_get_capture_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
811 goto fail;
812
813 r.values[i] = (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (double) (u->hw_volume_max - u->hw_volume_min));
814 }
815 }
816
817 } else {
818 pa_volume_t vol;
819 long alsa_vol;
820
821 pa_assert(u->hw_dB_supported);
822
823 vol = pa_cvolume_max(&s->volume);
824
825 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
826 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
827
828 if ((err = snd_mixer_selem_set_capture_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
829 goto fail;
830
831 if ((err = snd_mixer_selem_get_capture_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
832 goto fail;
833
834 pa_cvolume_set(&r, s->volume.channels, pa_sw_volume_from_dB((double) alsa_vol / 100.0));
835 }
836
837 u->hardware_volume = r;
838
839 if (u->hw_dB_supported) {
840 char t[PA_CVOLUME_SNPRINT_MAX];
841
842 /* Match exactly what the user requested by software */
843
844 pa_alsa_volume_divide(&r, &s->volume);
845 pa_source_set_soft_volume(s, &r);
846
847 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
848 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
849 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
850
851 } else
852
853 /* We can't match exactly what the user requested, hence let's
854 * at least tell the user about it */
855
856 s->volume = r;
857
858 return 0;
859
860 fail:
861 pa_log_error("Unable to set volume: %s", snd_strerror(err));
862
863 return -1;
864 }
865
866 static int source_get_mute_cb(pa_source *s) {
867 struct userdata *u = s->userdata;
868 int err, sw;
869
870 pa_assert(u);
871 pa_assert(u->mixer_elem);
872
873 if ((err = snd_mixer_selem_get_capture_switch(u->mixer_elem, 0, &sw)) < 0) {
874 pa_log_error("Unable to get switch: %s", snd_strerror(err));
875 return -1;
876 }
877
878 s->muted = !sw;
879
880 return 0;
881 }
882
883 static int source_set_mute_cb(pa_source *s) {
884 struct userdata *u = s->userdata;
885 int err;
886
887 pa_assert(u);
888 pa_assert(u->mixer_elem);
889
890 if ((err = snd_mixer_selem_set_capture_switch_all(u->mixer_elem, !s->muted)) < 0) {
891 pa_log_error("Unable to set switch: %s", snd_strerror(err));
892 return -1;
893 }
894
895 return 0;
896 }
897
898 static void source_update_requested_latency_cb(pa_source *s) {
899 struct userdata *u = s->userdata;
900 pa_assert(u);
901
902 if (!u->pcm_handle)
903 return;
904
905 update_sw_params(u);
906 }
907
908 static void thread_func(void *userdata) {
909 struct userdata *u = userdata;
910
911 pa_assert(u);
912
913 pa_log_debug("Thread starting up");
914
915 if (u->core->realtime_scheduling)
916 pa_make_realtime(u->core->realtime_priority);
917
918 pa_thread_mq_install(&u->thread_mq);
919 pa_rtpoll_install(u->rtpoll);
920
921 for (;;) {
922 int ret;
923
924 /* pa_log_debug("loop"); */
925
926 /* Read some data and pass it to the sources */
927 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
928 int work_done = 0;
929 pa_usec_t sleep_usec = 0;
930
931 if (u->use_mmap)
932 work_done = mmap_read(u, &sleep_usec);
933 else
934 work_done = unix_read(u, &sleep_usec);
935
936 if (work_done < 0)
937 goto fail;
938
939 /* pa_log_debug("work_done = %i", work_done); */
940
941 if (work_done)
942 update_smoother(u);
943
944 if (u->use_tsched) {
945 pa_usec_t cusec;
946
947 /* OK, the capture buffer is now empty, let's
948 * calculate when to wake up next */
949
950 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
951
952 /* Convert from the sound card time domain to the
953 * system time domain */
954 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
955
956 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
957
958 /* We don't trust the conversion, so we wake up whatever comes first */
959 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
960 }
961 } else if (u->use_tsched)
962
963 /* OK, we're in an invalid state, let's disable our timers */
964 pa_rtpoll_set_timer_disabled(u->rtpoll);
965
966 /* Hmm, nothing to do. Let's sleep */
967 if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
968 goto fail;
969
970 if (ret == 0)
971 goto finish;
972
973 /* Tell ALSA about this and process its response */
974 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
975 struct pollfd *pollfd;
976 unsigned short revents = 0;
977 int err;
978 unsigned n;
979
980 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
981
982 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
983 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
984 goto fail;
985 }
986
987 if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
988 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
989 goto fail;
990
991 snd_pcm_start(u->pcm_handle);
992 }
993
994 if (revents && u->use_tsched)
995 pa_log_debug("Wakeup from ALSA! (%i)", revents);
996 }
997 }
998
999 fail:
1000 /* If this was no regular exit from the loop we have to continue
1001 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1002 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1003 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1004
1005 finish:
1006 pa_log_debug("Thread shutting down");
1007 }
1008
1009 int pa__init(pa_module*m) {
1010
1011 pa_modargs *ma = NULL;
1012 struct userdata *u = NULL;
1013 const char *dev_id;
1014 pa_sample_spec ss;
1015 pa_channel_map map;
1016 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1017 snd_pcm_uframes_t period_frames, tsched_frames;
1018 size_t frame_size;
1019 snd_pcm_info_t *pcm_info = NULL;
1020 int err;
1021 const char *name;
1022 char *name_buf = NULL;
1023 pa_bool_t namereg_fail;
1024 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d;
1025 pa_source_new_data data;
1026
1027 snd_pcm_info_alloca(&pcm_info);
1028
1029 pa_assert(m);
1030
1031 pa_alsa_redirect_errors_inc();
1032
1033 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1034 pa_log("Failed to parse module arguments");
1035 goto fail;
1036 }
1037
1038 ss = m->core->default_sample_spec;
1039 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1040 pa_log("Failed to parse sample specification");
1041 goto fail;
1042 }
1043
1044 frame_size = pa_frame_size(&ss);
1045
1046 nfrags = m->core->default_n_fragments;
1047 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1048 if (frag_size <= 0)
1049 frag_size = (uint32_t) frame_size;
1050 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1051 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1052
1053 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1054 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1055 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1056 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1057 pa_log("Failed to parse buffer metrics");
1058 goto fail;
1059 }
1060
1061 hwbuf_size = frag_size * nfrags;
1062 period_frames = frag_size/frame_size;
1063 tsched_frames = tsched_size/frame_size;
1064
1065 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1066 pa_log("Failed to parse mmap argument.");
1067 goto fail;
1068 }
1069
1070 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1071 pa_log("Failed to parse timer_scheduling argument.");
1072 goto fail;
1073 }
1074
1075 if (use_tsched && !pa_rtclock_hrtimer()) {
1076 pa_log("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1077 use_tsched = FALSE;
1078 }
1079
1080 u = pa_xnew0(struct userdata, 1);
1081 u->core = m->core;
1082 u->module = m;
1083 m->userdata = u;
1084 u->use_mmap = use_mmap;
1085 u->use_tsched = use_tsched;
1086 u->rtpoll = pa_rtpoll_new();
1087 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1088 u->alsa_rtpoll_item = NULL;
1089
1090 u->smoother = pa_smoother_new(DEFAULT_TSCHED_WATERMARK_USEC, DEFAULT_TSCHED_WATERMARK_USEC, TRUE, 5);
1091 pa_smoother_set_time_offset(u->smoother, pa_rtclock_usec());
1092
1093 snd_config_update_free_global();
1094
1095 b = use_mmap;
1096 d = use_tsched;
1097
1098 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1099
1100 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1101 dev_id,
1102 &u->device_name,
1103 &ss, &map,
1104 SND_PCM_STREAM_CAPTURE,
1105 &nfrags, &period_frames, tsched_frames,
1106 &b, &d)))
1107 goto fail;
1108
1109 } else {
1110
1111 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1112 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1113 &u->device_name,
1114 &ss, &map,
1115 SND_PCM_STREAM_CAPTURE,
1116 &nfrags, &period_frames, tsched_frames,
1117 &b, &d)))
1118 goto fail;
1119 }
1120
1121 pa_assert(u->device_name);
1122 pa_log_info("Successfully opened device %s.", u->device_name);
1123
1124 if (use_mmap && !b) {
1125 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1126 u->use_mmap = use_mmap = FALSE;
1127 }
1128
1129 if (use_tsched && (!b || !d)) {
1130 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1131 u->use_tsched = use_tsched = FALSE;
1132 }
1133
1134 if (u->use_mmap)
1135 pa_log_info("Successfully enabled mmap() mode.");
1136
1137 if (u->use_tsched)
1138 pa_log_info("Successfully enabled timer-based scheduling mode.");
1139
1140 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1141 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1142 goto fail;
1143 }
1144
1145 /* ALSA might tweak the sample spec, so recalculate the frame size */
1146 frame_size = pa_frame_size(&ss);
1147
1148 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1149 pa_log("Error opening mixer: %s", snd_strerror(err));
1150 else {
1151 pa_bool_t found = FALSE;
1152
1153 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1154 found = TRUE;
1155 else {
1156 snd_pcm_info_t* info;
1157
1158 snd_pcm_info_alloca(&info);
1159
1160 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1161 char *md;
1162 int card;
1163
1164 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1165
1166 md = pa_sprintf_malloc("hw:%i", card);
1167
1168 if (strcmp(u->device_name, md))
1169 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1170 found = TRUE;
1171 pa_xfree(md);
1172 }
1173 }
1174 }
1175
1176 if (found)
1177 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Capture", "Mic")))
1178 found = FALSE;
1179
1180 if (!found) {
1181 snd_mixer_close(u->mixer_handle);
1182 u->mixer_handle = NULL;
1183 }
1184 }
1185
1186 if ((name = pa_modargs_get_value(ma, "source_name", NULL)))
1187 namereg_fail = TRUE;
1188 else {
1189 name = name_buf = pa_sprintf_malloc("alsa_input.%s", u->device_name);
1190 namereg_fail = FALSE;
1191 }
1192
1193 pa_source_new_data_init(&data);
1194 data.driver = __FILE__;
1195 data.module = m;
1196 pa_source_new_data_set_name(&data, name);
1197 data.namereg_fail = namereg_fail;
1198 pa_source_new_data_set_sample_spec(&data, &ss);
1199 pa_source_new_data_set_channel_map(&data, &map);
1200
1201 pa_alsa_init_proplist(data.proplist, pcm_info);
1202 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1203 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1204 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1205 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1206
1207 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY);
1208 pa_source_new_data_done(&data);
1209 pa_xfree(name_buf);
1210
1211 if (!u->source) {
1212 pa_log("Failed to create source object");
1213 goto fail;
1214 }
1215
1216 u->source->parent.process_msg = source_process_msg;
1217 u->source->update_requested_latency = source_update_requested_latency_cb;
1218 u->source->userdata = u;
1219
1220 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1221 pa_source_set_rtpoll(u->source, u->rtpoll);
1222
1223 u->frame_size = frame_size;
1224 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1225 u->nfragments = nfrags;
1226 u->hwbuf_size = u->fragment_size * nfrags;
1227 u->hwbuf_unused_frames = 0;
1228 u->tsched_watermark = tsched_watermark;
1229 u->frame_index = 0;
1230 u->hw_dB_supported = FALSE;
1231 u->hw_dB_min = u->hw_dB_max = 0;
1232 u->hw_volume_min = u->hw_volume_max = 0;
1233 u->mixer_seperate_channels = FALSE;
1234 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1235
1236 if (use_tsched)
1237 fix_tsched_watermark(u);
1238
1239 pa_source_set_latency_range(u->source,
1240 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1241 pa_bytes_to_usec(u->hwbuf_size, &ss));
1242
1243 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1244 nfrags, (long unsigned) u->fragment_size,
1245 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1246
1247 if (use_tsched)
1248 pa_log_info("Time scheduling watermark is %0.2fms",
1249 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1250
1251 if (update_sw_params(u) < 0)
1252 goto fail;
1253
1254 if (u->mixer_handle) {
1255 pa_assert(u->mixer_elem);
1256
1257 if (snd_mixer_selem_has_capture_volume(u->mixer_elem)) {
1258 pa_bool_t suitable = TRUE;
1259
1260 if (snd_mixer_selem_get_capture_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0) {
1261 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1262 suitable = FALSE;
1263 } else {
1264 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1265 pa_assert(u->hw_volume_min < u->hw_volume_max);
1266 }
1267
1268 if (snd_mixer_selem_get_capture_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1269 pa_log_info("Mixer doesn't support dB information.");
1270 else {
1271 #ifdef HAVE_VALGRIND_MEMCHECK_H
1272 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1273 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1274 #endif
1275
1276 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1277 pa_assert(u->hw_dB_min < u->hw_dB_max);
1278 u->hw_dB_supported = TRUE;
1279 }
1280
1281 if (suitable &&
1282 !u->hw_dB_supported &&
1283 u->hw_volume_max - u->hw_volume_min < 3) {
1284
1285 pa_log_info("Device has less than 4 volume levels. Falling back to software volume control.");
1286 suitable = FALSE;
1287 }
1288
1289
1290 if (suitable) {
1291 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, FALSE) >= 0;
1292
1293 u->source->get_volume = source_get_volume_cb;
1294 u->source->set_volume = source_set_volume_cb;
1295 u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SOURCE_DECIBEL_VOLUME : 0);
1296 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1297 } else
1298 pa_log_info("Using software volume control.");
1299 }
1300
1301 if (snd_mixer_selem_has_capture_switch(u->mixer_elem)) {
1302 u->source->get_mute = source_get_mute_cb;
1303 u->source->set_mute = source_set_mute_cb;
1304 u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1305 } else
1306 pa_log_info("Using software mute control.");
1307
1308 u->mixer_fdl = pa_alsa_fdlist_new();
1309
1310 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1311 pa_log("Failed to initialize file descriptor monitoring");
1312 goto fail;
1313 }
1314
1315 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1316 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1317 } else
1318 u->mixer_fdl = NULL;
1319
1320 pa_alsa_dump(u->pcm_handle);
1321
1322 if (!(u->thread = pa_thread_new(thread_func, u))) {
1323 pa_log("Failed to create thread.");
1324 goto fail;
1325 }
1326 /* Get initial mixer settings */
1327 if (data.volume_is_set) {
1328 if (u->source->set_volume)
1329 u->source->set_volume(u->source);
1330 } else {
1331 if (u->source->get_volume)
1332 u->source->get_volume(u->source);
1333 }
1334
1335 if (data.muted_is_set) {
1336 if (u->source->set_mute)
1337 u->source->set_mute(u->source);
1338 } else {
1339 if (u->source->get_mute)
1340 u->source->get_mute(u->source);
1341 }
1342
1343 pa_source_put(u->source);
1344
1345 pa_modargs_free(ma);
1346
1347 return 0;
1348
1349 fail:
1350
1351 if (ma)
1352 pa_modargs_free(ma);
1353
1354 pa__done(m);
1355
1356 return -1;
1357 }
1358
1359 void pa__done(pa_module*m) {
1360 struct userdata *u;
1361
1362 pa_assert(m);
1363
1364 if (!(u = m->userdata)) {
1365 pa_alsa_redirect_errors_dec();
1366 return;
1367 }
1368
1369 if (u->source)
1370 pa_source_unlink(u->source);
1371
1372 if (u->thread) {
1373 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1374 pa_thread_free(u->thread);
1375 }
1376
1377 pa_thread_mq_done(&u->thread_mq);
1378
1379 if (u->source)
1380 pa_source_unref(u->source);
1381
1382 if (u->alsa_rtpoll_item)
1383 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1384
1385 if (u->rtpoll)
1386 pa_rtpoll_free(u->rtpoll);
1387
1388 if (u->mixer_fdl)
1389 pa_alsa_fdlist_free(u->mixer_fdl);
1390
1391 if (u->mixer_handle)
1392 snd_mixer_close(u->mixer_handle);
1393
1394 if (u->pcm_handle) {
1395 snd_pcm_drop(u->pcm_handle);
1396 snd_pcm_close(u->pcm_handle);
1397 }
1398
1399 if (u->smoother)
1400 pa_smoother_free(u->smoother);
1401
1402 pa_xfree(u->device_name);
1403 pa_xfree(u);
1404
1405 snd_config_update_free_global();
1406 pa_alsa_redirect_errors_dec();
1407 }