]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
source: rework volume handling
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core-error.h>
38 #include <pulsecore/core.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/time-smoother.h>
53
54 #include <modules/reserve-wrap.h>
55
56 #include "alsa-util.h"
57 #include "alsa-source.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
65 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
66 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
67
68 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
69
70 struct userdata {
71 pa_core *core;
72 pa_module *module;
73 pa_source *source;
74
75 pa_thread *thread;
76 pa_thread_mq thread_mq;
77 pa_rtpoll *rtpoll;
78
79 snd_pcm_t *pcm_handle;
80
81 pa_alsa_fdlist *mixer_fdl;
82 snd_mixer_t *mixer_handle;
83 pa_alsa_path_set *mixer_path_set;
84 pa_alsa_path *mixer_path;
85
86 pa_cvolume hardware_volume;
87
88 size_t
89 frame_size,
90 fragment_size,
91 hwbuf_size,
92 tsched_watermark,
93 hwbuf_unused,
94 min_sleep,
95 min_wakeup,
96 watermark_step;
97
98 unsigned nfragments;
99
100 char *device_name;
101 char *control_device;
102
103 pa_bool_t use_mmap:1, use_tsched:1;
104
105 pa_rtpoll_item *alsa_rtpoll_item;
106
107 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
108
109 pa_smoother *smoother;
110 uint64_t read_count;
111
112 pa_reserve_wrapper *reserve;
113 pa_hook_slot *reserve_slot;
114 pa_reserve_monitor_wrapper *monitor;
115 pa_hook_slot *monitor_slot;
116 };
117
118 static void userdata_free(struct userdata *u);
119
120 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
121 pa_assert(r);
122 pa_assert(u);
123
124 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
125 return PA_HOOK_CANCEL;
126
127 return PA_HOOK_OK;
128 }
129
130 static void reserve_done(struct userdata *u) {
131 pa_assert(u);
132
133 if (u->reserve_slot) {
134 pa_hook_slot_free(u->reserve_slot);
135 u->reserve_slot = NULL;
136 }
137
138 if (u->reserve) {
139 pa_reserve_wrapper_unref(u->reserve);
140 u->reserve = NULL;
141 }
142 }
143
144 static void reserve_update(struct userdata *u) {
145 const char *description;
146 pa_assert(u);
147
148 if (!u->source || !u->reserve)
149 return;
150
151 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
152 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
153 }
154
155 static int reserve_init(struct userdata *u, const char *dname) {
156 char *rname;
157
158 pa_assert(u);
159 pa_assert(dname);
160
161 if (u->reserve)
162 return 0;
163
164 if (pa_in_system_mode())
165 return 0;
166
167 /* We are resuming, try to lock the device */
168 if (!(rname = pa_alsa_get_reserve_name(dname)))
169 return 0;
170
171 u->reserve = pa_reserve_wrapper_get(u->core, rname);
172 pa_xfree(rname);
173
174 if (!(u->reserve))
175 return -1;
176
177 reserve_update(u);
178
179 pa_assert(!u->reserve_slot);
180 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
181
182 return 0;
183 }
184
185 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
186 pa_bool_t b;
187
188 pa_assert(w);
189 pa_assert(u);
190
191 b = PA_PTR_TO_UINT(busy) && !u->reserve;
192
193 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
194 return PA_HOOK_OK;
195 }
196
197 static void monitor_done(struct userdata *u) {
198 pa_assert(u);
199
200 if (u->monitor_slot) {
201 pa_hook_slot_free(u->monitor_slot);
202 u->monitor_slot = NULL;
203 }
204
205 if (u->monitor) {
206 pa_reserve_monitor_wrapper_unref(u->monitor);
207 u->monitor = NULL;
208 }
209 }
210
211 static int reserve_monitor_init(struct userdata *u, const char *dname) {
212 char *rname;
213
214 pa_assert(u);
215 pa_assert(dname);
216
217 if (pa_in_system_mode())
218 return 0;
219
220 /* We are resuming, try to lock the device */
221 if (!(rname = pa_alsa_get_reserve_name(dname)))
222 return 0;
223
224 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
225 pa_xfree(rname);
226
227 if (!(u->monitor))
228 return -1;
229
230 pa_assert(!u->monitor_slot);
231 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
232
233 return 0;
234 }
235
236 static void fix_min_sleep_wakeup(struct userdata *u) {
237 size_t max_use, max_use_2;
238 pa_assert(u);
239
240 max_use = u->hwbuf_size - u->hwbuf_unused;
241 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
242
243 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
244 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
245
246 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
247 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
248 }
249
250 static void fix_tsched_watermark(struct userdata *u) {
251 size_t max_use;
252 pa_assert(u);
253
254 max_use = u->hwbuf_size - u->hwbuf_unused;
255
256 if (u->tsched_watermark > max_use - u->min_sleep)
257 u->tsched_watermark = max_use - u->min_sleep;
258
259 if (u->tsched_watermark < u->min_wakeup)
260 u->tsched_watermark = u->min_wakeup;
261 }
262
263 static void adjust_after_overrun(struct userdata *u) {
264 size_t old_watermark;
265 pa_usec_t old_min_latency, new_min_latency;
266
267 pa_assert(u);
268 pa_assert(u->use_tsched);
269
270 /* First, just try to increase the watermark */
271 old_watermark = u->tsched_watermark;
272 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
273
274 fix_tsched_watermark(u);
275
276 if (old_watermark != u->tsched_watermark) {
277 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
278 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
279 return;
280 }
281
282 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
283 old_min_latency = u->source->thread_info.min_latency;
284 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
285 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
286
287 if (old_min_latency != new_min_latency) {
288 pa_log_notice("Increasing minimal latency to %0.2f ms",
289 (double) new_min_latency / PA_USEC_PER_MSEC);
290
291 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
292 return;
293 }
294
295 /* When we reach this we're officialy fucked! */
296 }
297
298 static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
299 pa_usec_t wm, usec;
300
301 pa_assert(u);
302
303 usec = pa_source_get_requested_latency_within_thread(u->source);
304
305 if (usec == (pa_usec_t) -1)
306 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
307
308 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
309
310 if (wm > usec)
311 wm = usec/2;
312
313 *sleep_usec = usec - wm;
314 *process_usec = wm;
315
316 #ifdef DEBUG_TIMING
317 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
318 (unsigned long) (usec / PA_USEC_PER_MSEC),
319 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
320 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
321 #endif
322
323 return usec;
324 }
325
326 static int try_recover(struct userdata *u, const char *call, int err) {
327 pa_assert(u);
328 pa_assert(call);
329 pa_assert(err < 0);
330
331 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
332
333 pa_assert(err != -EAGAIN);
334
335 if (err == -EPIPE)
336 pa_log_debug("%s: Buffer overrun!", call);
337
338 if (err == -ESTRPIPE)
339 pa_log_debug("%s: System suspended!", call);
340
341 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
342 pa_log("%s: %s", call, pa_alsa_strerror(err));
343 return -1;
344 }
345
346 snd_pcm_start(u->pcm_handle);
347 return 0;
348 }
349
350 static size_t check_left_to_record(struct userdata *u, size_t n_bytes) {
351 size_t left_to_record;
352 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
353
354 /* We use <= instead of < for this check here because an overrun
355 * only happens after the last sample was processed, not already when
356 * it is removed from the buffer. This is particularly important
357 * when block transfer is used. */
358
359 if (n_bytes <= rec_space) {
360 left_to_record = rec_space - n_bytes;
361
362 #ifdef DEBUG_TIMING
363 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
364 #endif
365
366 } else {
367 left_to_record = 0;
368
369 #ifdef DEBUG_TIMING
370 PA_DEBUG_TRAP;
371 #endif
372
373 if (pa_log_ratelimit())
374 pa_log_info("Overrun!");
375
376 if (u->use_tsched)
377 adjust_after_overrun(u);
378 }
379
380 return left_to_record;
381 }
382
383 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
384 pa_bool_t work_done = FALSE;
385 pa_usec_t max_sleep_usec = 0, process_usec = 0;
386 size_t left_to_record;
387 unsigned j = 0;
388
389 pa_assert(u);
390 pa_source_assert_ref(u->source);
391
392 if (u->use_tsched)
393 hw_sleep_time(u, &max_sleep_usec, &process_usec);
394
395 for (;;) {
396 snd_pcm_sframes_t n;
397 size_t n_bytes;
398 int r;
399 pa_bool_t after_avail = TRUE;
400
401 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
402
403 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
404 continue;
405
406 return r;
407 }
408
409 n_bytes = (size_t) n * u->frame_size;
410
411 #ifdef DEBUG_TIMING
412 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
413 #endif
414
415 left_to_record = check_left_to_record(u, n_bytes);
416
417 if (u->use_tsched)
418 if (!polled &&
419 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
420 #ifdef DEBUG_TIMING
421 pa_log_debug("Not reading, because too early.");
422 #endif
423 break;
424 }
425
426 if (PA_UNLIKELY(n_bytes <= 0)) {
427
428 if (polled)
429 PA_ONCE_BEGIN {
430 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
431 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
432 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
433 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
434 pa_strnull(dn));
435 pa_xfree(dn);
436 } PA_ONCE_END;
437
438 #ifdef DEBUG_TIMING
439 pa_log_debug("Not reading, because not necessary.");
440 #endif
441 break;
442 }
443
444 if (++j > 10) {
445 #ifdef DEBUG_TIMING
446 pa_log_debug("Not filling up, because already too many iterations.");
447 #endif
448
449 break;
450 }
451
452 polled = FALSE;
453
454 #ifdef DEBUG_TIMING
455 pa_log_debug("Reading");
456 #endif
457
458 for (;;) {
459 int err;
460 const snd_pcm_channel_area_t *areas;
461 snd_pcm_uframes_t offset, frames;
462 pa_memchunk chunk;
463 void *p;
464 snd_pcm_sframes_t sframes;
465
466 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
467
468 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
469
470 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
471
472 if (!after_avail && err == -EAGAIN)
473 break;
474
475 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
476 continue;
477
478 return r;
479 }
480
481 /* Make sure that if these memblocks need to be copied they will fit into one slot */
482 if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
483 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
484
485 if (!after_avail && frames == 0)
486 break;
487
488 pa_assert(frames > 0);
489 after_avail = FALSE;
490
491 /* Check these are multiples of 8 bit */
492 pa_assert((areas[0].first & 7) == 0);
493 pa_assert((areas[0].step & 7)== 0);
494
495 /* We assume a single interleaved memory buffer */
496 pa_assert((areas[0].first >> 3) == 0);
497 pa_assert((areas[0].step >> 3) == u->frame_size);
498
499 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
500
501 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
502 chunk.length = pa_memblock_get_length(chunk.memblock);
503 chunk.index = 0;
504
505 pa_source_post(u->source, &chunk);
506 pa_memblock_unref_fixed(chunk.memblock);
507
508 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
509
510 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
511 continue;
512
513 return r;
514 }
515
516 work_done = TRUE;
517
518 u->read_count += frames * u->frame_size;
519
520 #ifdef DEBUG_TIMING
521 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
522 #endif
523
524 if ((size_t) frames * u->frame_size >= n_bytes)
525 break;
526
527 n_bytes -= (size_t) frames * u->frame_size;
528 }
529 }
530
531 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
532
533 if (*sleep_usec > process_usec)
534 *sleep_usec -= process_usec;
535 else
536 *sleep_usec = 0;
537
538 return work_done ? 1 : 0;
539 }
540
541 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
542 int work_done = FALSE;
543 pa_usec_t max_sleep_usec = 0, process_usec = 0;
544 size_t left_to_record;
545 unsigned j = 0;
546
547 pa_assert(u);
548 pa_source_assert_ref(u->source);
549
550 if (u->use_tsched)
551 hw_sleep_time(u, &max_sleep_usec, &process_usec);
552
553 for (;;) {
554 snd_pcm_sframes_t n;
555 size_t n_bytes;
556 int r;
557 pa_bool_t after_avail = TRUE;
558
559 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
560
561 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
562 continue;
563
564 return r;
565 }
566
567 n_bytes = (size_t) n * u->frame_size;
568 left_to_record = check_left_to_record(u, n_bytes);
569
570 if (u->use_tsched)
571 if (!polled &&
572 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
573 break;
574
575 if (PA_UNLIKELY(n_bytes <= 0)) {
576
577 if (polled)
578 PA_ONCE_BEGIN {
579 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
580 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
581 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
582 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
583 pa_strnull(dn));
584 pa_xfree(dn);
585 } PA_ONCE_END;
586
587 break;
588 }
589
590 if (++j > 10) {
591 #ifdef DEBUG_TIMING
592 pa_log_debug("Not filling up, because already too many iterations.");
593 #endif
594
595 break;
596 }
597
598 polled = FALSE;
599
600 for (;;) {
601 void *p;
602 snd_pcm_sframes_t frames;
603 pa_memchunk chunk;
604
605 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
606
607 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
608
609 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
610 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
611
612 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
613
614 p = pa_memblock_acquire(chunk.memblock);
615 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
616 pa_memblock_release(chunk.memblock);
617
618 if (PA_UNLIKELY(frames < 0)) {
619 pa_memblock_unref(chunk.memblock);
620
621 if (!after_avail && (int) frames == -EAGAIN)
622 break;
623
624 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
625 continue;
626
627 return r;
628 }
629
630 if (!after_avail && frames == 0) {
631 pa_memblock_unref(chunk.memblock);
632 break;
633 }
634
635 pa_assert(frames > 0);
636 after_avail = FALSE;
637
638 chunk.index = 0;
639 chunk.length = (size_t) frames * u->frame_size;
640
641 pa_source_post(u->source, &chunk);
642 pa_memblock_unref(chunk.memblock);
643
644 work_done = TRUE;
645
646 u->read_count += frames * u->frame_size;
647
648 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
649
650 if ((size_t) frames * u->frame_size >= n_bytes)
651 break;
652
653 n_bytes -= (size_t) frames * u->frame_size;
654 }
655 }
656
657 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
658
659 if (*sleep_usec > process_usec)
660 *sleep_usec -= process_usec;
661 else
662 *sleep_usec = 0;
663
664 return work_done ? 1 : 0;
665 }
666
667 static void update_smoother(struct userdata *u) {
668 snd_pcm_sframes_t delay = 0;
669 uint64_t position;
670 int err;
671 pa_usec_t now1 = 0, now2;
672 snd_pcm_status_t *status;
673
674 snd_pcm_status_alloca(&status);
675
676 pa_assert(u);
677 pa_assert(u->pcm_handle);
678
679 /* Let's update the time smoother */
680
681 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
682 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
683 return;
684 }
685
686 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
687 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
688 else {
689 snd_htimestamp_t htstamp = { 0, 0 };
690 snd_pcm_status_get_htstamp(status, &htstamp);
691 now1 = pa_timespec_load(&htstamp);
692 }
693
694 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
695
696 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
697 if (now1 <= 0)
698 now1 = pa_rtclock_now();
699
700 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
701
702 pa_smoother_put(u->smoother, now1, now2);
703 }
704
705 static pa_usec_t source_get_latency(struct userdata *u) {
706 int64_t delay;
707 pa_usec_t now1, now2;
708
709 pa_assert(u);
710
711 now1 = pa_rtclock_now();
712 now2 = pa_smoother_get(u->smoother, now1);
713
714 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
715
716 return delay >= 0 ? (pa_usec_t) delay : 0;
717 }
718
719 static int build_pollfd(struct userdata *u) {
720 pa_assert(u);
721 pa_assert(u->pcm_handle);
722
723 if (u->alsa_rtpoll_item)
724 pa_rtpoll_item_free(u->alsa_rtpoll_item);
725
726 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
727 return -1;
728
729 return 0;
730 }
731
732 static int suspend(struct userdata *u) {
733 pa_assert(u);
734 pa_assert(u->pcm_handle);
735
736 pa_smoother_pause(u->smoother, pa_rtclock_now());
737
738 /* Let's suspend */
739 snd_pcm_close(u->pcm_handle);
740 u->pcm_handle = NULL;
741
742 if (u->alsa_rtpoll_item) {
743 pa_rtpoll_item_free(u->alsa_rtpoll_item);
744 u->alsa_rtpoll_item = NULL;
745 }
746
747 pa_log_info("Device suspended...");
748
749 return 0;
750 }
751
752 static int update_sw_params(struct userdata *u) {
753 snd_pcm_uframes_t avail_min;
754 int err;
755
756 pa_assert(u);
757
758 /* Use the full buffer if noone asked us for anything specific */
759 u->hwbuf_unused = 0;
760
761 if (u->use_tsched) {
762 pa_usec_t latency;
763
764 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
765 size_t b;
766
767 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
768
769 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
770
771 /* We need at least one sample in our buffer */
772
773 if (PA_UNLIKELY(b < u->frame_size))
774 b = u->frame_size;
775
776 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
777 }
778
779 fix_min_sleep_wakeup(u);
780 fix_tsched_watermark(u);
781 }
782
783 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
784
785 avail_min = 1;
786
787 if (u->use_tsched) {
788 pa_usec_t sleep_usec, process_usec;
789
790 hw_sleep_time(u, &sleep_usec, &process_usec);
791 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
792 }
793
794 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
795
796 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
797 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
798 return err;
799 }
800
801 return 0;
802 }
803
804 static int unsuspend(struct userdata *u) {
805 pa_sample_spec ss;
806 int err;
807 pa_bool_t b, d;
808 unsigned nfrags;
809 snd_pcm_uframes_t period_size;
810
811 pa_assert(u);
812 pa_assert(!u->pcm_handle);
813
814 pa_log_info("Trying resume...");
815
816 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
817 /*SND_PCM_NONBLOCK|*/
818 SND_PCM_NO_AUTO_RESAMPLE|
819 SND_PCM_NO_AUTO_CHANNELS|
820 SND_PCM_NO_AUTO_FORMAT)) < 0) {
821 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
822 goto fail;
823 }
824
825 ss = u->source->sample_spec;
826 nfrags = u->nfragments;
827 period_size = u->fragment_size / u->frame_size;
828 b = u->use_mmap;
829 d = u->use_tsched;
830
831 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
832 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
833 goto fail;
834 }
835
836 if (b != u->use_mmap || d != u->use_tsched) {
837 pa_log_warn("Resume failed, couldn't get original access mode.");
838 goto fail;
839 }
840
841 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
842 pa_log_warn("Resume failed, couldn't restore original sample settings.");
843 goto fail;
844 }
845
846 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
847 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
848 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
849 (unsigned long) nfrags, period_size * u->frame_size);
850 goto fail;
851 }
852
853 if (update_sw_params(u) < 0)
854 goto fail;
855
856 if (build_pollfd(u) < 0)
857 goto fail;
858
859 /* FIXME: We need to reload the volume somehow */
860
861 snd_pcm_start(u->pcm_handle);
862
863 u->read_count = 0;
864 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
865
866 pa_log_info("Resumed successfully...");
867
868 return 0;
869
870 fail:
871 if (u->pcm_handle) {
872 snd_pcm_close(u->pcm_handle);
873 u->pcm_handle = NULL;
874 }
875
876 return -1;
877 }
878
879 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
880 struct userdata *u = PA_SOURCE(o)->userdata;
881
882 switch (code) {
883
884 case PA_SOURCE_MESSAGE_GET_LATENCY: {
885 pa_usec_t r = 0;
886
887 if (u->pcm_handle)
888 r = source_get_latency(u);
889
890 *((pa_usec_t*) data) = r;
891
892 return 0;
893 }
894
895 case PA_SOURCE_MESSAGE_SET_STATE:
896
897 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
898
899 case PA_SOURCE_SUSPENDED:
900 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
901
902 if (suspend(u) < 0)
903 return -1;
904
905 break;
906
907 case PA_SOURCE_IDLE:
908 case PA_SOURCE_RUNNING:
909
910 if (u->source->thread_info.state == PA_SOURCE_INIT) {
911 if (build_pollfd(u) < 0)
912 return -1;
913
914 snd_pcm_start(u->pcm_handle);
915 }
916
917 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
918 if (unsuspend(u) < 0)
919 return -1;
920 }
921
922 break;
923
924 case PA_SOURCE_UNLINKED:
925 case PA_SOURCE_INIT:
926 case PA_SOURCE_INVALID_STATE:
927 ;
928 }
929
930 break;
931 }
932
933 return pa_source_process_msg(o, code, data, offset, chunk);
934 }
935
936 /* Called from main context */
937 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
938 pa_source_state_t old_state;
939 struct userdata *u;
940
941 pa_source_assert_ref(s);
942 pa_assert_se(u = s->userdata);
943
944 old_state = pa_source_get_state(u->source);
945
946 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
947 reserve_done(u);
948 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
949 if (reserve_init(u, u->device_name) < 0)
950 return -1;
951
952 return 0;
953 }
954
955 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
956 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
957
958 pa_assert(u);
959 pa_assert(u->mixer_handle);
960
961 if (mask == SND_CTL_EVENT_MASK_REMOVE)
962 return 0;
963
964 if (mask & SND_CTL_EVENT_MASK_VALUE) {
965 pa_source_get_volume(u->source, TRUE);
966 pa_source_get_mute(u->source, TRUE);
967 }
968
969 return 0;
970 }
971
972 static void source_get_volume_cb(pa_source *s) {
973 struct userdata *u = s->userdata;
974 pa_cvolume r;
975 char t[PA_CVOLUME_SNPRINT_MAX];
976
977 pa_assert(u);
978 pa_assert(u->mixer_path);
979 pa_assert(u->mixer_handle);
980
981 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
982 return;
983
984 /* Shift down by the base volume, so that 0dB becomes maximum volume */
985 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
986
987 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
988
989 if (pa_cvolume_equal(&u->hardware_volume, &r))
990 return;
991
992 s->volume = u->hardware_volume = r;
993
994 /* Hmm, so the hardware volume changed, let's reset our software volume */
995 if (u->mixer_path->has_dB)
996 pa_source_set_soft_volume(s, NULL);
997 }
998
999 static void source_set_volume_cb(pa_source *s) {
1000 struct userdata *u = s->userdata;
1001 pa_cvolume r;
1002 char t[PA_CVOLUME_SNPRINT_MAX];
1003
1004 pa_assert(u);
1005 pa_assert(u->mixer_path);
1006 pa_assert(u->mixer_handle);
1007
1008 /* Shift up by the base volume */
1009 pa_sw_cvolume_divide_scalar(&r, &s->volume, s->base_volume);
1010
1011 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1012 return;
1013
1014 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1015 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1016
1017 u->hardware_volume = r;
1018
1019 if (u->mixer_path->has_dB) {
1020 pa_cvolume new_soft_volume;
1021 pa_bool_t accurate_enough;
1022
1023 /* Match exactly what the user requested by software */
1024 pa_sw_cvolume_divide(&new_soft_volume, &s->volume, &u->hardware_volume);
1025
1026 /* If the adjustment to do in software is only minimal we
1027 * can skip it. That saves us CPU at the expense of a bit of
1028 * accuracy */
1029 accurate_enough =
1030 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1031 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1032
1033 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
1034 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1035 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1036 pa_yes_no(accurate_enough));
1037
1038 if (!accurate_enough)
1039 s->soft_volume = new_soft_volume;
1040
1041 } else {
1042 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1043
1044 /* We can't match exactly what the user requested, hence let's
1045 * at least tell the user about it */
1046
1047 s->volume = r;
1048 }
1049 }
1050
1051 static void source_get_mute_cb(pa_source *s) {
1052 struct userdata *u = s->userdata;
1053 pa_bool_t b;
1054
1055 pa_assert(u);
1056 pa_assert(u->mixer_path);
1057 pa_assert(u->mixer_handle);
1058
1059 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1060 return;
1061
1062 s->muted = b;
1063 }
1064
1065 static void source_set_mute_cb(pa_source *s) {
1066 struct userdata *u = s->userdata;
1067
1068 pa_assert(u);
1069 pa_assert(u->mixer_path);
1070 pa_assert(u->mixer_handle);
1071
1072 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1073 }
1074
1075 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1076 struct userdata *u = s->userdata;
1077 pa_alsa_port_data *data;
1078
1079 pa_assert(u);
1080 pa_assert(p);
1081 pa_assert(u->mixer_handle);
1082
1083 data = PA_DEVICE_PORT_DATA(p);
1084
1085 pa_assert_se(u->mixer_path = data->path);
1086 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1087
1088 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1089 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1090 s->n_volume_steps = PA_VOLUME_NORM+1;
1091
1092 if (u->mixer_path->max_dB > 0.0)
1093 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1094 else
1095 pa_log_info("No particular base volume set, fixing to 0 dB");
1096 } else {
1097 s->base_volume = PA_VOLUME_NORM;
1098 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1099 }
1100
1101 if (data->setting)
1102 pa_alsa_setting_select(data->setting, u->mixer_handle);
1103
1104 if (s->set_mute)
1105 s->set_mute(s);
1106 if (s->set_volume)
1107 s->set_volume(s);
1108
1109 return 0;
1110 }
1111
1112 static void source_update_requested_latency_cb(pa_source *s) {
1113 struct userdata *u = s->userdata;
1114 pa_assert(u);
1115
1116 if (!u->pcm_handle)
1117 return;
1118
1119 update_sw_params(u);
1120 }
1121
1122 static void thread_func(void *userdata) {
1123 struct userdata *u = userdata;
1124 unsigned short revents = 0;
1125
1126 pa_assert(u);
1127
1128 pa_log_debug("Thread starting up");
1129
1130 if (u->core->realtime_scheduling)
1131 pa_make_realtime(u->core->realtime_priority);
1132
1133 pa_thread_mq_install(&u->thread_mq);
1134
1135 for (;;) {
1136 int ret;
1137
1138 #ifdef DEBUG_TIMING
1139 pa_log_debug("Loop");
1140 #endif
1141
1142 /* Read some data and pass it to the sources */
1143 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1144 int work_done;
1145 pa_usec_t sleep_usec = 0;
1146
1147 if (u->use_mmap)
1148 work_done = mmap_read(u, &sleep_usec, revents & POLLIN);
1149 else
1150 work_done = unix_read(u, &sleep_usec, revents & POLLIN);
1151
1152 if (work_done < 0)
1153 goto fail;
1154
1155 /* pa_log_debug("work_done = %i", work_done); */
1156
1157 if (work_done)
1158 update_smoother(u);
1159
1160 if (u->use_tsched) {
1161 pa_usec_t cusec;
1162
1163 /* OK, the capture buffer is now empty, let's
1164 * calculate when to wake up next */
1165
1166 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1167
1168 /* Convert from the sound card time domain to the
1169 * system time domain */
1170 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1171
1172 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1173
1174 /* We don't trust the conversion, so we wake up whatever comes first */
1175 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1176 }
1177 } else if (u->use_tsched)
1178
1179 /* OK, we're in an invalid state, let's disable our timers */
1180 pa_rtpoll_set_timer_disabled(u->rtpoll);
1181
1182 /* Hmm, nothing to do. Let's sleep */
1183 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1184 goto fail;
1185
1186 if (ret == 0)
1187 goto finish;
1188
1189 /* Tell ALSA about this and process its response */
1190 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1191 struct pollfd *pollfd;
1192 int err;
1193 unsigned n;
1194
1195 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1196
1197 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1198 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1199 goto fail;
1200 }
1201
1202 if (revents & ~POLLIN) {
1203 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1204 goto fail;
1205
1206 snd_pcm_start(u->pcm_handle);
1207 } else if (revents && u->use_tsched && pa_log_ratelimit())
1208 pa_log_debug("Wakeup from ALSA!");
1209
1210 } else
1211 revents = 0;
1212 }
1213
1214 fail:
1215 /* If this was no regular exit from the loop we have to continue
1216 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1217 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1218 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1219
1220 finish:
1221 pa_log_debug("Thread shutting down");
1222 }
1223
1224 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1225 const char *n;
1226 char *t;
1227
1228 pa_assert(data);
1229 pa_assert(ma);
1230 pa_assert(device_name);
1231
1232 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1233 pa_source_new_data_set_name(data, n);
1234 data->namereg_fail = TRUE;
1235 return;
1236 }
1237
1238 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1239 data->namereg_fail = TRUE;
1240 else {
1241 n = device_id ? device_id : device_name;
1242 data->namereg_fail = FALSE;
1243 }
1244
1245 if (mapping)
1246 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1247 else
1248 t = pa_sprintf_malloc("alsa_input.%s", n);
1249
1250 pa_source_new_data_set_name(data, t);
1251 pa_xfree(t);
1252 }
1253
1254 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1255
1256 if (!mapping && !element)
1257 return;
1258
1259 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1260 pa_log_info("Failed to find a working mixer device.");
1261 return;
1262 }
1263
1264 if (element) {
1265
1266 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1267 goto fail;
1268
1269 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1270 goto fail;
1271
1272 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1273 pa_alsa_path_dump(u->mixer_path);
1274 } else {
1275
1276 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1277 goto fail;
1278
1279 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1280
1281 pa_log_debug("Probed mixer paths:");
1282 pa_alsa_path_set_dump(u->mixer_path_set);
1283 }
1284
1285 return;
1286
1287 fail:
1288
1289 if (u->mixer_path_set) {
1290 pa_alsa_path_set_free(u->mixer_path_set);
1291 u->mixer_path_set = NULL;
1292 } else if (u->mixer_path) {
1293 pa_alsa_path_free(u->mixer_path);
1294 u->mixer_path = NULL;
1295 }
1296
1297 if (u->mixer_handle) {
1298 snd_mixer_close(u->mixer_handle);
1299 u->mixer_handle = NULL;
1300 }
1301 }
1302
1303 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1304 pa_assert(u);
1305
1306 if (!u->mixer_handle)
1307 return 0;
1308
1309 if (u->source->active_port) {
1310 pa_alsa_port_data *data;
1311
1312 /* We have a list of supported paths, so let's activate the
1313 * one that has been chosen as active */
1314
1315 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1316 u->mixer_path = data->path;
1317
1318 pa_alsa_path_select(data->path, u->mixer_handle);
1319
1320 if (data->setting)
1321 pa_alsa_setting_select(data->setting, u->mixer_handle);
1322
1323 } else {
1324
1325 if (!u->mixer_path && u->mixer_path_set)
1326 u->mixer_path = u->mixer_path_set->paths;
1327
1328 if (u->mixer_path) {
1329 /* Hmm, we have only a single path, then let's activate it */
1330
1331 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1332
1333 if (u->mixer_path->settings)
1334 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1335 } else
1336 return 0;
1337 }
1338
1339 if (!u->mixer_path->has_volume)
1340 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1341 else {
1342
1343 if (u->mixer_path->has_dB) {
1344 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1345
1346 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1347 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1348
1349 if (u->mixer_path->max_dB > 0.0)
1350 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1351 else
1352 pa_log_info("No particular base volume set, fixing to 0 dB");
1353
1354 } else {
1355 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1356 u->source->base_volume = PA_VOLUME_NORM;
1357 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1358 }
1359
1360 u->source->get_volume = source_get_volume_cb;
1361 u->source->set_volume = source_set_volume_cb;
1362
1363 u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SOURCE_DECIBEL_VOLUME : 0);
1364 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1365 }
1366
1367 if (!u->mixer_path->has_mute) {
1368 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1369 } else {
1370 u->source->get_mute = source_get_mute_cb;
1371 u->source->set_mute = source_set_mute_cb;
1372 u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1373 pa_log_info("Using hardware mute control.");
1374 }
1375
1376 u->mixer_fdl = pa_alsa_fdlist_new();
1377
1378 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1379 pa_log("Failed to initialize file descriptor monitoring");
1380 return -1;
1381 }
1382
1383 if (u->mixer_path_set)
1384 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1385 else
1386 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1387
1388 return 0;
1389 }
1390
1391 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1392
1393 struct userdata *u = NULL;
1394 const char *dev_id = NULL;
1395 pa_sample_spec ss, requested_ss;
1396 pa_channel_map map;
1397 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1398 snd_pcm_uframes_t period_frames, tsched_frames;
1399 size_t frame_size;
1400 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1401 pa_source_new_data data;
1402 pa_alsa_profile_set *profile_set = NULL;
1403
1404 pa_assert(m);
1405 pa_assert(ma);
1406
1407 ss = m->core->default_sample_spec;
1408 map = m->core->default_channel_map;
1409 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1410 pa_log("Failed to parse sample specification");
1411 goto fail;
1412 }
1413
1414 requested_ss = ss;
1415 frame_size = pa_frame_size(&ss);
1416
1417 nfrags = m->core->default_n_fragments;
1418 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1419 if (frag_size <= 0)
1420 frag_size = (uint32_t) frame_size;
1421 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1422 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1423
1424 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1425 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1426 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1427 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1428 pa_log("Failed to parse buffer metrics");
1429 goto fail;
1430 }
1431
1432 hwbuf_size = frag_size * nfrags;
1433 period_frames = frag_size/frame_size;
1434 tsched_frames = tsched_size/frame_size;
1435
1436 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1437 pa_log("Failed to parse mmap argument.");
1438 goto fail;
1439 }
1440
1441 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1442 pa_log("Failed to parse timer_scheduling argument.");
1443 goto fail;
1444 }
1445
1446 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1447 pa_log("Failed to parse ignore_dB argument.");
1448 goto fail;
1449 }
1450
1451 if (use_tsched && !pa_rtclock_hrtimer()) {
1452 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1453 use_tsched = FALSE;
1454 }
1455
1456 u = pa_xnew0(struct userdata, 1);
1457 u->core = m->core;
1458 u->module = m;
1459 u->use_mmap = use_mmap;
1460 u->use_tsched = use_tsched;
1461 u->rtpoll = pa_rtpoll_new();
1462 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1463
1464 u->smoother = pa_smoother_new(
1465 DEFAULT_TSCHED_WATERMARK_USEC*2,
1466 DEFAULT_TSCHED_WATERMARK_USEC*2,
1467 TRUE,
1468 TRUE,
1469 5,
1470 pa_rtclock_now(),
1471 FALSE);
1472
1473 dev_id = pa_modargs_get_value(
1474 ma, "device_id",
1475 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1476
1477 if (reserve_init(u, dev_id) < 0)
1478 goto fail;
1479
1480 if (reserve_monitor_init(u, dev_id) < 0)
1481 goto fail;
1482
1483 b = use_mmap;
1484 d = use_tsched;
1485
1486 if (mapping) {
1487
1488 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1489 pa_log("device_id= not set");
1490 goto fail;
1491 }
1492
1493 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1494 dev_id,
1495 &u->device_name,
1496 &ss, &map,
1497 SND_PCM_STREAM_CAPTURE,
1498 &nfrags, &period_frames, tsched_frames,
1499 &b, &d, mapping)))
1500 goto fail;
1501
1502 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1503
1504 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1505 goto fail;
1506
1507 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1508 dev_id,
1509 &u->device_name,
1510 &ss, &map,
1511 SND_PCM_STREAM_CAPTURE,
1512 &nfrags, &period_frames, tsched_frames,
1513 &b, &d, profile_set, &mapping)))
1514 goto fail;
1515
1516 } else {
1517
1518 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1519 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1520 &u->device_name,
1521 &ss, &map,
1522 SND_PCM_STREAM_CAPTURE,
1523 &nfrags, &period_frames, tsched_frames,
1524 &b, &d, FALSE)))
1525 goto fail;
1526 }
1527
1528 pa_assert(u->device_name);
1529 pa_log_info("Successfully opened device %s.", u->device_name);
1530
1531 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1532 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1533 goto fail;
1534 }
1535
1536 if (mapping)
1537 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1538
1539 if (use_mmap && !b) {
1540 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1541 u->use_mmap = use_mmap = FALSE;
1542 }
1543
1544 if (use_tsched && (!b || !d)) {
1545 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1546 u->use_tsched = use_tsched = FALSE;
1547 }
1548
1549 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1550 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1551 u->use_tsched = use_tsched = FALSE;
1552 }
1553
1554 if (u->use_mmap)
1555 pa_log_info("Successfully enabled mmap() mode.");
1556
1557 if (u->use_tsched)
1558 pa_log_info("Successfully enabled timer-based scheduling mode.");
1559
1560 /* ALSA might tweak the sample spec, so recalculate the frame size */
1561 frame_size = pa_frame_size(&ss);
1562
1563 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1564
1565 pa_source_new_data_init(&data);
1566 data.driver = driver;
1567 data.module = m;
1568 data.card = card;
1569 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1570 pa_source_new_data_set_sample_spec(&data, &ss);
1571 pa_source_new_data_set_channel_map(&data, &map);
1572
1573 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1574 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1575 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1576 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1577 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1578
1579 if (mapping) {
1580 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1581 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1582 }
1583
1584 pa_alsa_init_description(data.proplist);
1585
1586 if (u->control_device)
1587 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1588
1589 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1590 pa_log("Invalid properties");
1591 pa_source_new_data_done(&data);
1592 goto fail;
1593 }
1594
1595 if (u->mixer_path_set)
1596 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1597
1598 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1599 pa_source_new_data_done(&data);
1600
1601 if (!u->source) {
1602 pa_log("Failed to create source object");
1603 goto fail;
1604 }
1605
1606 u->source->parent.process_msg = source_process_msg;
1607 u->source->update_requested_latency = source_update_requested_latency_cb;
1608 u->source->set_state = source_set_state_cb;
1609 u->source->set_port = source_set_port_cb;
1610 u->source->userdata = u;
1611
1612 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1613 pa_source_set_rtpoll(u->source, u->rtpoll);
1614
1615 u->frame_size = frame_size;
1616 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1617 u->nfragments = nfrags;
1618 u->hwbuf_size = u->fragment_size * nfrags;
1619 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1620 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1621
1622 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1623 nfrags, (long unsigned) u->fragment_size,
1624 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1625
1626 if (u->use_tsched) {
1627 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->source->sample_spec);
1628
1629 fix_min_sleep_wakeup(u);
1630 fix_tsched_watermark(u);
1631
1632 pa_source_set_latency_range(u->source,
1633 0,
1634 pa_bytes_to_usec(u->hwbuf_size, &ss));
1635
1636 pa_log_info("Time scheduling watermark is %0.2fms",
1637 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1638 } else
1639 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1640
1641 reserve_update(u);
1642
1643 if (update_sw_params(u) < 0)
1644 goto fail;
1645
1646 if (setup_mixer(u, ignore_dB) < 0)
1647 goto fail;
1648
1649 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1650
1651 if (!(u->thread = pa_thread_new(thread_func, u))) {
1652 pa_log("Failed to create thread.");
1653 goto fail;
1654 }
1655 /* Get initial mixer settings */
1656 if (data.volume_is_set) {
1657 if (u->source->set_volume)
1658 u->source->set_volume(u->source);
1659 } else {
1660 if (u->source->get_volume)
1661 u->source->get_volume(u->source);
1662 }
1663
1664 if (data.muted_is_set) {
1665 if (u->source->set_mute)
1666 u->source->set_mute(u->source);
1667 } else {
1668 if (u->source->get_mute)
1669 u->source->get_mute(u->source);
1670 }
1671
1672 pa_source_put(u->source);
1673
1674 if (profile_set)
1675 pa_alsa_profile_set_free(profile_set);
1676
1677 return u->source;
1678
1679 fail:
1680
1681 if (u)
1682 userdata_free(u);
1683
1684 if (profile_set)
1685 pa_alsa_profile_set_free(profile_set);
1686
1687 return NULL;
1688 }
1689
1690 static void userdata_free(struct userdata *u) {
1691 pa_assert(u);
1692
1693 if (u->source)
1694 pa_source_unlink(u->source);
1695
1696 if (u->thread) {
1697 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1698 pa_thread_free(u->thread);
1699 }
1700
1701 pa_thread_mq_done(&u->thread_mq);
1702
1703 if (u->source)
1704 pa_source_unref(u->source);
1705
1706 if (u->alsa_rtpoll_item)
1707 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1708
1709 if (u->rtpoll)
1710 pa_rtpoll_free(u->rtpoll);
1711
1712 if (u->pcm_handle) {
1713 snd_pcm_drop(u->pcm_handle);
1714 snd_pcm_close(u->pcm_handle);
1715 }
1716
1717 if (u->mixer_fdl)
1718 pa_alsa_fdlist_free(u->mixer_fdl);
1719
1720 if (u->mixer_path_set)
1721 pa_alsa_path_set_free(u->mixer_path_set);
1722 else if (u->mixer_path)
1723 pa_alsa_path_free(u->mixer_path);
1724
1725 if (u->mixer_handle)
1726 snd_mixer_close(u->mixer_handle);
1727
1728 if (u->smoother)
1729 pa_smoother_free(u->smoother);
1730
1731 reserve_done(u);
1732 monitor_done(u);
1733
1734 pa_xfree(u->device_name);
1735 pa_xfree(u->control_device);
1736 pa_xfree(u);
1737 }
1738
1739 void pa_alsa_source_free(pa_source *s) {
1740 struct userdata *u;
1741
1742 pa_source_assert_ref(s);
1743 pa_assert_se(u = s->userdata);
1744
1745 userdata_free(u);
1746 }