]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
alsa: Take syncronized HW volume infra into use for alsa-sink
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core-error.h>
38 #include <pulsecore/core.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/time-smoother.h>
53
54 #include <modules/reserve-wrap.h>
55
56 #include "alsa-util.h"
57 #include "alsa-source.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62
63 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
64 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
65
66 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
67 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
68 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
69 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
70 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
71 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72
73 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
74 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
75
76 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
77 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
78
79 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
80 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
81
82 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
83
84 struct userdata {
85 pa_core *core;
86 pa_module *module;
87 pa_source *source;
88
89 pa_thread *thread;
90 pa_thread_mq thread_mq;
91 pa_rtpoll *rtpoll;
92
93 snd_pcm_t *pcm_handle;
94
95 pa_alsa_fdlist *mixer_fdl;
96 snd_mixer_t *mixer_handle;
97 pa_alsa_path_set *mixer_path_set;
98 pa_alsa_path *mixer_path;
99
100 pa_cvolume hardware_volume;
101
102 size_t
103 frame_size,
104 fragment_size,
105 hwbuf_size,
106 tsched_watermark,
107 hwbuf_unused,
108 min_sleep,
109 min_wakeup,
110 watermark_inc_step,
111 watermark_dec_step,
112 watermark_inc_threshold,
113 watermark_dec_threshold;
114
115 pa_usec_t watermark_dec_not_before;
116
117 char *device_name;
118 char *control_device;
119
120 pa_bool_t use_mmap:1, use_tsched:1;
121
122 pa_bool_t first;
123
124 pa_rtpoll_item *alsa_rtpoll_item;
125
126 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
127
128 pa_smoother *smoother;
129 uint64_t read_count;
130 pa_usec_t smoother_interval;
131 pa_usec_t last_smoother_update;
132
133 pa_reserve_wrapper *reserve;
134 pa_hook_slot *reserve_slot;
135 pa_reserve_monitor_wrapper *monitor;
136 pa_hook_slot *monitor_slot;
137 };
138
139 static void userdata_free(struct userdata *u);
140
141 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
142 pa_assert(r);
143 pa_assert(u);
144
145 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
146 return PA_HOOK_CANCEL;
147
148 return PA_HOOK_OK;
149 }
150
151 static void reserve_done(struct userdata *u) {
152 pa_assert(u);
153
154 if (u->reserve_slot) {
155 pa_hook_slot_free(u->reserve_slot);
156 u->reserve_slot = NULL;
157 }
158
159 if (u->reserve) {
160 pa_reserve_wrapper_unref(u->reserve);
161 u->reserve = NULL;
162 }
163 }
164
165 static void reserve_update(struct userdata *u) {
166 const char *description;
167 pa_assert(u);
168
169 if (!u->source || !u->reserve)
170 return;
171
172 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
173 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
174 }
175
176 static int reserve_init(struct userdata *u, const char *dname) {
177 char *rname;
178
179 pa_assert(u);
180 pa_assert(dname);
181
182 if (u->reserve)
183 return 0;
184
185 if (pa_in_system_mode())
186 return 0;
187
188 /* We are resuming, try to lock the device */
189 if (!(rname = pa_alsa_get_reserve_name(dname)))
190 return 0;
191
192 u->reserve = pa_reserve_wrapper_get(u->core, rname);
193 pa_xfree(rname);
194
195 if (!(u->reserve))
196 return -1;
197
198 reserve_update(u);
199
200 pa_assert(!u->reserve_slot);
201 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
202
203 return 0;
204 }
205
206 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
207 pa_bool_t b;
208
209 pa_assert(w);
210 pa_assert(u);
211
212 b = PA_PTR_TO_UINT(busy) && !u->reserve;
213
214 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
215 return PA_HOOK_OK;
216 }
217
218 static void monitor_done(struct userdata *u) {
219 pa_assert(u);
220
221 if (u->monitor_slot) {
222 pa_hook_slot_free(u->monitor_slot);
223 u->monitor_slot = NULL;
224 }
225
226 if (u->monitor) {
227 pa_reserve_monitor_wrapper_unref(u->monitor);
228 u->monitor = NULL;
229 }
230 }
231
232 static int reserve_monitor_init(struct userdata *u, const char *dname) {
233 char *rname;
234
235 pa_assert(u);
236 pa_assert(dname);
237
238 if (pa_in_system_mode())
239 return 0;
240
241 /* We are resuming, try to lock the device */
242 if (!(rname = pa_alsa_get_reserve_name(dname)))
243 return 0;
244
245 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
246 pa_xfree(rname);
247
248 if (!(u->monitor))
249 return -1;
250
251 pa_assert(!u->monitor_slot);
252 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
253
254 return 0;
255 }
256
257 static void fix_min_sleep_wakeup(struct userdata *u) {
258 size_t max_use, max_use_2;
259 pa_assert(u);
260 pa_assert(u->use_tsched);
261
262 max_use = u->hwbuf_size - u->hwbuf_unused;
263 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
264
265 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
266 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
267
268 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
269 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
270 }
271
272 static void fix_tsched_watermark(struct userdata *u) {
273 size_t max_use;
274 pa_assert(u);
275 pa_assert(u->use_tsched);
276
277 max_use = u->hwbuf_size - u->hwbuf_unused;
278
279 if (u->tsched_watermark > max_use - u->min_sleep)
280 u->tsched_watermark = max_use - u->min_sleep;
281
282 if (u->tsched_watermark < u->min_wakeup)
283 u->tsched_watermark = u->min_wakeup;
284 }
285
286 static void increase_watermark(struct userdata *u) {
287 size_t old_watermark;
288 pa_usec_t old_min_latency, new_min_latency;
289
290 pa_assert(u);
291 pa_assert(u->use_tsched);
292
293 /* First, just try to increase the watermark */
294 old_watermark = u->tsched_watermark;
295 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
296 fix_tsched_watermark(u);
297
298 if (old_watermark != u->tsched_watermark) {
299 pa_log_info("Increasing wakeup watermark to %0.2f ms",
300 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
301 return;
302 }
303
304 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
305 old_min_latency = u->source->thread_info.min_latency;
306 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
307 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
308
309 if (old_min_latency != new_min_latency) {
310 pa_log_info("Increasing minimal latency to %0.2f ms",
311 (double) new_min_latency / PA_USEC_PER_MSEC);
312
313 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
314 }
315
316 /* When we reach this we're officialy fucked! */
317 }
318
319 static void decrease_watermark(struct userdata *u) {
320 size_t old_watermark;
321 pa_usec_t now;
322
323 pa_assert(u);
324 pa_assert(u->use_tsched);
325
326 now = pa_rtclock_now();
327
328 if (u->watermark_dec_not_before <= 0)
329 goto restart;
330
331 if (u->watermark_dec_not_before > now)
332 return;
333
334 old_watermark = u->tsched_watermark;
335
336 if (u->tsched_watermark < u->watermark_dec_step)
337 u->tsched_watermark = u->tsched_watermark / 2;
338 else
339 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
340
341 fix_tsched_watermark(u);
342
343 if (old_watermark != u->tsched_watermark)
344 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
345 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
346
347 /* We don't change the latency range*/
348
349 restart:
350 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
351 }
352
353 static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
354 pa_usec_t wm, usec;
355
356 pa_assert(sleep_usec);
357 pa_assert(process_usec);
358
359 pa_assert(u);
360 pa_assert(u->use_tsched);
361
362 usec = pa_source_get_requested_latency_within_thread(u->source);
363
364 if (usec == (pa_usec_t) -1)
365 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
366
367 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
368
369 if (wm > usec)
370 wm = usec/2;
371
372 *sleep_usec = usec - wm;
373 *process_usec = wm;
374
375 #ifdef DEBUG_TIMING
376 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
377 (unsigned long) (usec / PA_USEC_PER_MSEC),
378 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
379 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
380 #endif
381
382 return usec;
383 }
384
385 static int try_recover(struct userdata *u, const char *call, int err) {
386 pa_assert(u);
387 pa_assert(call);
388 pa_assert(err < 0);
389
390 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
391
392 pa_assert(err != -EAGAIN);
393
394 if (err == -EPIPE)
395 pa_log_debug("%s: Buffer overrun!", call);
396
397 if (err == -ESTRPIPE)
398 pa_log_debug("%s: System suspended!", call);
399
400 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
401 pa_log("%s: %s", call, pa_alsa_strerror(err));
402 return -1;
403 }
404
405 u->first = TRUE;
406 return 0;
407 }
408
409 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
410 size_t left_to_record;
411 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
412 pa_bool_t overrun = FALSE;
413
414 /* We use <= instead of < for this check here because an overrun
415 * only happens after the last sample was processed, not already when
416 * it is removed from the buffer. This is particularly important
417 * when block transfer is used. */
418
419 if (n_bytes <= rec_space)
420 left_to_record = rec_space - n_bytes;
421 else {
422
423 /* We got a dropout. What a mess! */
424 left_to_record = 0;
425 overrun = TRUE;
426
427 #ifdef DEBUG_TIMING
428 PA_DEBUG_TRAP;
429 #endif
430
431 if (pa_log_ratelimit())
432 pa_log_info("Overrun!");
433 }
434
435 #ifdef DEBUG_TIMING
436 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
437 #endif
438
439 if (u->use_tsched) {
440 pa_bool_t reset_not_before = TRUE;
441
442 if (overrun || left_to_record < u->watermark_inc_threshold)
443 increase_watermark(u);
444 else if (left_to_record > u->watermark_dec_threshold) {
445 reset_not_before = FALSE;
446
447 /* We decrease the watermark only if have actually been
448 * woken up by a timeout. If something else woke us up
449 * it's too easy to fulfill the deadlines... */
450
451 if (on_timeout)
452 decrease_watermark(u);
453 }
454
455 if (reset_not_before)
456 u->watermark_dec_not_before = 0;
457 }
458
459 return left_to_record;
460 }
461
462 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
463 pa_bool_t work_done = FALSE;
464 pa_usec_t max_sleep_usec = 0, process_usec = 0;
465 size_t left_to_record;
466 unsigned j = 0;
467
468 pa_assert(u);
469 pa_source_assert_ref(u->source);
470
471 if (u->use_tsched)
472 hw_sleep_time(u, &max_sleep_usec, &process_usec);
473
474 for (;;) {
475 snd_pcm_sframes_t n;
476 size_t n_bytes;
477 int r;
478 pa_bool_t after_avail = TRUE;
479
480 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
481
482 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
483 continue;
484
485 return r;
486 }
487
488 n_bytes = (size_t) n * u->frame_size;
489
490 #ifdef DEBUG_TIMING
491 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
492 #endif
493
494 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
495 on_timeout = FALSE;
496
497 if (u->use_tsched)
498 if (!polled &&
499 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
500 #ifdef DEBUG_TIMING
501 pa_log_debug("Not reading, because too early.");
502 #endif
503 break;
504 }
505
506 if (PA_UNLIKELY(n_bytes <= 0)) {
507
508 if (polled)
509 PA_ONCE_BEGIN {
510 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
511 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
512 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
513 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
514 pa_strnull(dn));
515 pa_xfree(dn);
516 } PA_ONCE_END;
517
518 #ifdef DEBUG_TIMING
519 pa_log_debug("Not reading, because not necessary.");
520 #endif
521 break;
522 }
523
524 if (++j > 10) {
525 #ifdef DEBUG_TIMING
526 pa_log_debug("Not filling up, because already too many iterations.");
527 #endif
528
529 break;
530 }
531
532 polled = FALSE;
533
534 #ifdef DEBUG_TIMING
535 pa_log_debug("Reading");
536 #endif
537
538 for (;;) {
539 int err;
540 const snd_pcm_channel_area_t *areas;
541 snd_pcm_uframes_t offset, frames;
542 pa_memchunk chunk;
543 void *p;
544 snd_pcm_sframes_t sframes;
545
546 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
547
548 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
549
550 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
551
552 if (!after_avail && err == -EAGAIN)
553 break;
554
555 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
556 continue;
557
558 return r;
559 }
560
561 /* Make sure that if these memblocks need to be copied they will fit into one slot */
562 if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
563 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
564
565 if (!after_avail && frames == 0)
566 break;
567
568 pa_assert(frames > 0);
569 after_avail = FALSE;
570
571 /* Check these are multiples of 8 bit */
572 pa_assert((areas[0].first & 7) == 0);
573 pa_assert((areas[0].step & 7)== 0);
574
575 /* We assume a single interleaved memory buffer */
576 pa_assert((areas[0].first >> 3) == 0);
577 pa_assert((areas[0].step >> 3) == u->frame_size);
578
579 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
580
581 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
582 chunk.length = pa_memblock_get_length(chunk.memblock);
583 chunk.index = 0;
584
585 pa_source_post(u->source, &chunk);
586 pa_memblock_unref_fixed(chunk.memblock);
587
588 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
589
590 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
591 continue;
592
593 return r;
594 }
595
596 work_done = TRUE;
597
598 u->read_count += frames * u->frame_size;
599
600 #ifdef DEBUG_TIMING
601 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
602 #endif
603
604 if ((size_t) frames * u->frame_size >= n_bytes)
605 break;
606
607 n_bytes -= (size_t) frames * u->frame_size;
608 }
609 }
610
611 if (u->use_tsched) {
612 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
613
614 if (*sleep_usec > process_usec)
615 *sleep_usec -= process_usec;
616 else
617 *sleep_usec = 0;
618 }
619
620 return work_done ? 1 : 0;
621 }
622
623 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
624 int work_done = FALSE;
625 pa_usec_t max_sleep_usec = 0, process_usec = 0;
626 size_t left_to_record;
627 unsigned j = 0;
628
629 pa_assert(u);
630 pa_source_assert_ref(u->source);
631
632 if (u->use_tsched)
633 hw_sleep_time(u, &max_sleep_usec, &process_usec);
634
635 for (;;) {
636 snd_pcm_sframes_t n;
637 size_t n_bytes;
638 int r;
639 pa_bool_t after_avail = TRUE;
640
641 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
642
643 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
644 continue;
645
646 return r;
647 }
648
649 n_bytes = (size_t) n * u->frame_size;
650 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
651 on_timeout = FALSE;
652
653 if (u->use_tsched)
654 if (!polled &&
655 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
656 break;
657
658 if (PA_UNLIKELY(n_bytes <= 0)) {
659
660 if (polled)
661 PA_ONCE_BEGIN {
662 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
663 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
664 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
665 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
666 pa_strnull(dn));
667 pa_xfree(dn);
668 } PA_ONCE_END;
669
670 break;
671 }
672
673 if (++j > 10) {
674 #ifdef DEBUG_TIMING
675 pa_log_debug("Not filling up, because already too many iterations.");
676 #endif
677
678 break;
679 }
680
681 polled = FALSE;
682
683 for (;;) {
684 void *p;
685 snd_pcm_sframes_t frames;
686 pa_memchunk chunk;
687
688 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
689
690 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
691
692 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
693 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
694
695 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
696
697 p = pa_memblock_acquire(chunk.memblock);
698 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
699 pa_memblock_release(chunk.memblock);
700
701 if (PA_UNLIKELY(frames < 0)) {
702 pa_memblock_unref(chunk.memblock);
703
704 if (!after_avail && (int) frames == -EAGAIN)
705 break;
706
707 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
708 continue;
709
710 return r;
711 }
712
713 if (!after_avail && frames == 0) {
714 pa_memblock_unref(chunk.memblock);
715 break;
716 }
717
718 pa_assert(frames > 0);
719 after_avail = FALSE;
720
721 chunk.index = 0;
722 chunk.length = (size_t) frames * u->frame_size;
723
724 pa_source_post(u->source, &chunk);
725 pa_memblock_unref(chunk.memblock);
726
727 work_done = TRUE;
728
729 u->read_count += frames * u->frame_size;
730
731 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
732
733 if ((size_t) frames * u->frame_size >= n_bytes)
734 break;
735
736 n_bytes -= (size_t) frames * u->frame_size;
737 }
738 }
739
740 if (u->use_tsched) {
741 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
742
743 if (*sleep_usec > process_usec)
744 *sleep_usec -= process_usec;
745 else
746 *sleep_usec = 0;
747 }
748
749 return work_done ? 1 : 0;
750 }
751
752 static void update_smoother(struct userdata *u) {
753 snd_pcm_sframes_t delay = 0;
754 uint64_t position;
755 int err;
756 pa_usec_t now1 = 0, now2;
757 snd_pcm_status_t *status;
758
759 snd_pcm_status_alloca(&status);
760
761 pa_assert(u);
762 pa_assert(u->pcm_handle);
763
764 /* Let's update the time smoother */
765
766 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
767 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
768 return;
769 }
770
771 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
772 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
773 else {
774 snd_htimestamp_t htstamp = { 0, 0 };
775 snd_pcm_status_get_htstamp(status, &htstamp);
776 now1 = pa_timespec_load(&htstamp);
777 }
778
779 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
780 if (now1 <= 0)
781 now1 = pa_rtclock_now();
782
783 /* check if the time since the last update is bigger than the interval */
784 if (u->last_smoother_update > 0)
785 if (u->last_smoother_update + u->smoother_interval > now1)
786 return;
787
788 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
789 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
790
791 pa_smoother_put(u->smoother, now1, now2);
792
793 u->last_smoother_update = now1;
794 /* exponentially increase the update interval up to the MAX limit */
795 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
796 }
797
798 static pa_usec_t source_get_latency(struct userdata *u) {
799 int64_t delay;
800 pa_usec_t now1, now2;
801
802 pa_assert(u);
803
804 now1 = pa_rtclock_now();
805 now2 = pa_smoother_get(u->smoother, now1);
806
807 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
808
809 return delay >= 0 ? (pa_usec_t) delay : 0;
810 }
811
812 static int build_pollfd(struct userdata *u) {
813 pa_assert(u);
814 pa_assert(u->pcm_handle);
815
816 if (u->alsa_rtpoll_item)
817 pa_rtpoll_item_free(u->alsa_rtpoll_item);
818
819 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
820 return -1;
821
822 return 0;
823 }
824
825 static int suspend(struct userdata *u) {
826 pa_assert(u);
827 pa_assert(u->pcm_handle);
828
829 pa_smoother_pause(u->smoother, pa_rtclock_now());
830
831 /* Let's suspend */
832 snd_pcm_close(u->pcm_handle);
833 u->pcm_handle = NULL;
834
835 if (u->alsa_rtpoll_item) {
836 pa_rtpoll_item_free(u->alsa_rtpoll_item);
837 u->alsa_rtpoll_item = NULL;
838 }
839
840 pa_log_info("Device suspended...");
841
842 return 0;
843 }
844
845 static int update_sw_params(struct userdata *u) {
846 snd_pcm_uframes_t avail_min;
847 int err;
848
849 pa_assert(u);
850
851 /* Use the full buffer if noone asked us for anything specific */
852 u->hwbuf_unused = 0;
853
854 if (u->use_tsched) {
855 pa_usec_t latency;
856
857 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
858 size_t b;
859
860 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
861
862 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
863
864 /* We need at least one sample in our buffer */
865
866 if (PA_UNLIKELY(b < u->frame_size))
867 b = u->frame_size;
868
869 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
870 }
871
872 fix_min_sleep_wakeup(u);
873 fix_tsched_watermark(u);
874 }
875
876 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
877
878 avail_min = 1;
879
880 if (u->use_tsched) {
881 pa_usec_t sleep_usec, process_usec;
882
883 hw_sleep_time(u, &sleep_usec, &process_usec);
884 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
885 }
886
887 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
888
889 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
890 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
891 return err;
892 }
893
894 return 0;
895 }
896
897 static int unsuspend(struct userdata *u) {
898 pa_sample_spec ss;
899 int err;
900 pa_bool_t b, d;
901 snd_pcm_uframes_t period_size, buffer_size;
902
903 pa_assert(u);
904 pa_assert(!u->pcm_handle);
905
906 pa_log_info("Trying resume...");
907
908 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
909 SND_PCM_NONBLOCK|
910 SND_PCM_NO_AUTO_RESAMPLE|
911 SND_PCM_NO_AUTO_CHANNELS|
912 SND_PCM_NO_AUTO_FORMAT)) < 0) {
913 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
914 goto fail;
915 }
916
917 ss = u->source->sample_spec;
918 period_size = u->fragment_size / u->frame_size;
919 buffer_size = u->hwbuf_size / u->frame_size;
920 b = u->use_mmap;
921 d = u->use_tsched;
922
923 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
924 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
925 goto fail;
926 }
927
928 if (b != u->use_mmap || d != u->use_tsched) {
929 pa_log_warn("Resume failed, couldn't get original access mode.");
930 goto fail;
931 }
932
933 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
934 pa_log_warn("Resume failed, couldn't restore original sample settings.");
935 goto fail;
936 }
937
938 if (period_size*u->frame_size != u->fragment_size ||
939 buffer_size*u->frame_size != u->hwbuf_size) {
940 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
941 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
942 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
943 goto fail;
944 }
945
946 if (update_sw_params(u) < 0)
947 goto fail;
948
949 if (build_pollfd(u) < 0)
950 goto fail;
951
952 /* FIXME: We need to reload the volume somehow */
953
954 u->read_count = 0;
955 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
956 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
957 u->last_smoother_update = 0;
958
959 u->first = TRUE;
960
961 pa_log_info("Resumed successfully...");
962
963 return 0;
964
965 fail:
966 if (u->pcm_handle) {
967 snd_pcm_close(u->pcm_handle);
968 u->pcm_handle = NULL;
969 }
970
971 return -PA_ERR_IO;
972 }
973
974 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
975 struct userdata *u = PA_SOURCE(o)->userdata;
976
977 switch (code) {
978
979 case PA_SOURCE_MESSAGE_GET_LATENCY: {
980 pa_usec_t r = 0;
981
982 if (u->pcm_handle)
983 r = source_get_latency(u);
984
985 *((pa_usec_t*) data) = r;
986
987 return 0;
988 }
989
990 case PA_SOURCE_MESSAGE_SET_STATE:
991
992 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
993
994 case PA_SOURCE_SUSPENDED: {
995 int r;
996 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
997
998 if ((r = suspend(u)) < 0)
999 return r;
1000
1001 break;
1002 }
1003
1004 case PA_SOURCE_IDLE:
1005 case PA_SOURCE_RUNNING: {
1006 int r;
1007
1008 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1009 if (build_pollfd(u) < 0)
1010 return -PA_ERR_IO;
1011 }
1012
1013 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1014 if ((r = unsuspend(u)) < 0)
1015 return r;
1016 }
1017
1018 break;
1019 }
1020
1021 case PA_SOURCE_UNLINKED:
1022 case PA_SOURCE_INIT:
1023 case PA_SOURCE_INVALID_STATE:
1024 ;
1025 }
1026
1027 break;
1028 }
1029
1030 return pa_source_process_msg(o, code, data, offset, chunk);
1031 }
1032
1033 /* Called from main context */
1034 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1035 pa_source_state_t old_state;
1036 struct userdata *u;
1037
1038 pa_source_assert_ref(s);
1039 pa_assert_se(u = s->userdata);
1040
1041 old_state = pa_source_get_state(u->source);
1042
1043 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1044 reserve_done(u);
1045 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1046 if (reserve_init(u, u->device_name) < 0)
1047 return -PA_ERR_BUSY;
1048
1049 return 0;
1050 }
1051
1052 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1053 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1054
1055 pa_assert(u);
1056 pa_assert(u->mixer_handle);
1057
1058 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1059 return 0;
1060
1061 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1062 return 0;
1063
1064 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1065 pa_source_get_volume(u->source, TRUE);
1066 pa_source_get_mute(u->source, TRUE);
1067 }
1068
1069 return 0;
1070 }
1071
1072 static void source_get_volume_cb(pa_source *s) {
1073 struct userdata *u = s->userdata;
1074 pa_cvolume r;
1075 char t[PA_CVOLUME_SNPRINT_MAX];
1076
1077 pa_assert(u);
1078 pa_assert(u->mixer_path);
1079 pa_assert(u->mixer_handle);
1080
1081 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1082 return;
1083
1084 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1085 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1086
1087 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1088
1089 if (pa_cvolume_equal(&u->hardware_volume, &r))
1090 return;
1091
1092 s->volume = u->hardware_volume = r;
1093
1094 /* Hmm, so the hardware volume changed, let's reset our software volume */
1095 if (u->mixer_path->has_dB)
1096 pa_source_set_soft_volume(s, NULL);
1097 }
1098
1099 static void source_set_volume_cb(pa_source *s) {
1100 struct userdata *u = s->userdata;
1101 pa_cvolume r;
1102 char t[PA_CVOLUME_SNPRINT_MAX];
1103
1104 pa_assert(u);
1105 pa_assert(u->mixer_path);
1106 pa_assert(u->mixer_handle);
1107
1108 /* Shift up by the base volume */
1109 pa_sw_cvolume_divide_scalar(&r, &s->volume, s->base_volume);
1110
1111 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, TRUE) < 0)
1112 return;
1113
1114 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1115 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1116
1117 u->hardware_volume = r;
1118
1119 if (u->mixer_path->has_dB) {
1120 pa_cvolume new_soft_volume;
1121 pa_bool_t accurate_enough;
1122
1123 /* Match exactly what the user requested by software */
1124 pa_sw_cvolume_divide(&new_soft_volume, &s->volume, &u->hardware_volume);
1125
1126 /* If the adjustment to do in software is only minimal we
1127 * can skip it. That saves us CPU at the expense of a bit of
1128 * accuracy */
1129 accurate_enough =
1130 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1131 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1132
1133 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
1134 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1135 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1136 pa_yes_no(accurate_enough));
1137
1138 if (!accurate_enough)
1139 s->soft_volume = new_soft_volume;
1140
1141 } else {
1142 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1143
1144 /* We can't match exactly what the user requested, hence let's
1145 * at least tell the user about it */
1146
1147 s->volume = r;
1148 }
1149 }
1150
1151 static void source_get_mute_cb(pa_source *s) {
1152 struct userdata *u = s->userdata;
1153 pa_bool_t b;
1154
1155 pa_assert(u);
1156 pa_assert(u->mixer_path);
1157 pa_assert(u->mixer_handle);
1158
1159 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1160 return;
1161
1162 s->muted = b;
1163 }
1164
1165 static void source_set_mute_cb(pa_source *s) {
1166 struct userdata *u = s->userdata;
1167
1168 pa_assert(u);
1169 pa_assert(u->mixer_path);
1170 pa_assert(u->mixer_handle);
1171
1172 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1173 }
1174
1175 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1176 struct userdata *u = s->userdata;
1177 pa_alsa_port_data *data;
1178
1179 pa_assert(u);
1180 pa_assert(p);
1181 pa_assert(u->mixer_handle);
1182
1183 data = PA_DEVICE_PORT_DATA(p);
1184
1185 pa_assert_se(u->mixer_path = data->path);
1186 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1187
1188 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1189 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1190 s->n_volume_steps = PA_VOLUME_NORM+1;
1191
1192 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1193 } else {
1194 s->base_volume = PA_VOLUME_NORM;
1195 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1196 }
1197
1198 if (data->setting)
1199 pa_alsa_setting_select(data->setting, u->mixer_handle);
1200
1201 if (s->set_mute)
1202 s->set_mute(s);
1203 if (s->set_volume)
1204 s->set_volume(s);
1205
1206 return 0;
1207 }
1208
1209 static void source_update_requested_latency_cb(pa_source *s) {
1210 struct userdata *u = s->userdata;
1211 pa_assert(u);
1212 pa_assert(u->use_tsched);
1213
1214 if (!u->pcm_handle)
1215 return;
1216
1217 update_sw_params(u);
1218 }
1219
1220 static void thread_func(void *userdata) {
1221 struct userdata *u = userdata;
1222 unsigned short revents = 0;
1223
1224 pa_assert(u);
1225
1226 pa_log_debug("Thread starting up");
1227
1228 if (u->core->realtime_scheduling)
1229 pa_make_realtime(u->core->realtime_priority);
1230
1231 pa_thread_mq_install(&u->thread_mq);
1232
1233 for (;;) {
1234 int ret;
1235
1236 #ifdef DEBUG_TIMING
1237 pa_log_debug("Loop");
1238 #endif
1239
1240 /* Read some data and pass it to the sources */
1241 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1242 int work_done;
1243 pa_usec_t sleep_usec = 0;
1244 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1245
1246 if (u->first) {
1247 pa_log_info("Starting capture.");
1248 snd_pcm_start(u->pcm_handle);
1249
1250 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1251
1252 u->first = FALSE;
1253 }
1254
1255 if (u->use_mmap)
1256 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1257 else
1258 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1259
1260 if (work_done < 0)
1261 goto fail;
1262
1263 /* pa_log_debug("work_done = %i", work_done); */
1264
1265 if (work_done)
1266 update_smoother(u);
1267
1268 if (u->use_tsched) {
1269 pa_usec_t cusec;
1270
1271 /* OK, the capture buffer is now empty, let's
1272 * calculate when to wake up next */
1273
1274 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1275
1276 /* Convert from the sound card time domain to the
1277 * system time domain */
1278 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1279
1280 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1281
1282 /* We don't trust the conversion, so we wake up whatever comes first */
1283 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1284 }
1285 } else if (u->use_tsched)
1286
1287 /* OK, we're in an invalid state, let's disable our timers */
1288 pa_rtpoll_set_timer_disabled(u->rtpoll);
1289
1290 /* Hmm, nothing to do. Let's sleep */
1291 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1292 goto fail;
1293
1294 if (ret == 0)
1295 goto finish;
1296
1297 /* Tell ALSA about this and process its response */
1298 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1299 struct pollfd *pollfd;
1300 int err;
1301 unsigned n;
1302
1303 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1304
1305 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1306 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1307 goto fail;
1308 }
1309
1310 if (revents & ~POLLIN) {
1311 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1312 goto fail;
1313
1314 u->first = TRUE;
1315 } else if (revents && u->use_tsched && pa_log_ratelimit())
1316 pa_log_debug("Wakeup from ALSA!");
1317
1318 } else
1319 revents = 0;
1320 }
1321
1322 fail:
1323 /* If this was no regular exit from the loop we have to continue
1324 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1325 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1326 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1327
1328 finish:
1329 pa_log_debug("Thread shutting down");
1330 }
1331
1332 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1333 const char *n;
1334 char *t;
1335
1336 pa_assert(data);
1337 pa_assert(ma);
1338 pa_assert(device_name);
1339
1340 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1341 pa_source_new_data_set_name(data, n);
1342 data->namereg_fail = TRUE;
1343 return;
1344 }
1345
1346 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1347 data->namereg_fail = TRUE;
1348 else {
1349 n = device_id ? device_id : device_name;
1350 data->namereg_fail = FALSE;
1351 }
1352
1353 if (mapping)
1354 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1355 else
1356 t = pa_sprintf_malloc("alsa_input.%s", n);
1357
1358 pa_source_new_data_set_name(data, t);
1359 pa_xfree(t);
1360 }
1361
1362 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1363
1364 if (!mapping && !element)
1365 return;
1366
1367 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1368 pa_log_info("Failed to find a working mixer device.");
1369 return;
1370 }
1371
1372 if (element) {
1373
1374 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1375 goto fail;
1376
1377 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1378 goto fail;
1379
1380 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1381 pa_alsa_path_dump(u->mixer_path);
1382 } else {
1383
1384 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1385 goto fail;
1386
1387 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1388
1389 pa_log_debug("Probed mixer paths:");
1390 pa_alsa_path_set_dump(u->mixer_path_set);
1391 }
1392
1393 return;
1394
1395 fail:
1396
1397 if (u->mixer_path_set) {
1398 pa_alsa_path_set_free(u->mixer_path_set);
1399 u->mixer_path_set = NULL;
1400 } else if (u->mixer_path) {
1401 pa_alsa_path_free(u->mixer_path);
1402 u->mixer_path = NULL;
1403 }
1404
1405 if (u->mixer_handle) {
1406 snd_mixer_close(u->mixer_handle);
1407 u->mixer_handle = NULL;
1408 }
1409 }
1410
1411 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1412 pa_assert(u);
1413
1414 if (!u->mixer_handle)
1415 return 0;
1416
1417 if (u->source->active_port) {
1418 pa_alsa_port_data *data;
1419
1420 /* We have a list of supported paths, so let's activate the
1421 * one that has been chosen as active */
1422
1423 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1424 u->mixer_path = data->path;
1425
1426 pa_alsa_path_select(data->path, u->mixer_handle);
1427
1428 if (data->setting)
1429 pa_alsa_setting_select(data->setting, u->mixer_handle);
1430
1431 } else {
1432
1433 if (!u->mixer_path && u->mixer_path_set)
1434 u->mixer_path = u->mixer_path_set->paths;
1435
1436 if (u->mixer_path) {
1437 /* Hmm, we have only a single path, then let's activate it */
1438
1439 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1440
1441 if (u->mixer_path->settings)
1442 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1443 } else
1444 return 0;
1445 }
1446
1447 if (!u->mixer_path->has_volume)
1448 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1449 else {
1450
1451 if (u->mixer_path->has_dB) {
1452 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1453
1454 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1455 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1456
1457 if (u->mixer_path->max_dB > 0.0)
1458 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1459 else
1460 pa_log_info("No particular base volume set, fixing to 0 dB");
1461
1462 } else {
1463 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1464 u->source->base_volume = PA_VOLUME_NORM;
1465 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1466 }
1467
1468 u->source->get_volume = source_get_volume_cb;
1469 u->source->set_volume = source_set_volume_cb;
1470
1471 u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SOURCE_DECIBEL_VOLUME : 0);
1472 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1473 }
1474
1475 if (!u->mixer_path->has_mute) {
1476 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1477 } else {
1478 u->source->get_mute = source_get_mute_cb;
1479 u->source->set_mute = source_set_mute_cb;
1480 u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1481 pa_log_info("Using hardware mute control.");
1482 }
1483
1484 u->mixer_fdl = pa_alsa_fdlist_new();
1485
1486 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1487 pa_log("Failed to initialize file descriptor monitoring");
1488 return -1;
1489 }
1490
1491 if (u->mixer_path_set)
1492 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1493 else
1494 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1495
1496 return 0;
1497 }
1498
1499 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1500
1501 struct userdata *u = NULL;
1502 const char *dev_id = NULL;
1503 pa_sample_spec ss, requested_ss;
1504 pa_channel_map map;
1505 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1506 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1507 size_t frame_size;
1508 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE;
1509 pa_source_new_data data;
1510 pa_alsa_profile_set *profile_set = NULL;
1511
1512 pa_assert(m);
1513 pa_assert(ma);
1514
1515 ss = m->core->default_sample_spec;
1516 map = m->core->default_channel_map;
1517 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1518 pa_log("Failed to parse sample specification");
1519 goto fail;
1520 }
1521
1522 requested_ss = ss;
1523 frame_size = pa_frame_size(&ss);
1524
1525 nfrags = m->core->default_n_fragments;
1526 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1527 if (frag_size <= 0)
1528 frag_size = (uint32_t) frame_size;
1529 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1530 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1531
1532 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1533 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1534 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1535 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1536 pa_log("Failed to parse buffer metrics");
1537 goto fail;
1538 }
1539
1540 buffer_size = nfrags * frag_size;
1541
1542 period_frames = frag_size/frame_size;
1543 buffer_frames = buffer_size/frame_size;
1544 tsched_frames = tsched_size/frame_size;
1545
1546 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1547 pa_log("Failed to parse mmap argument.");
1548 goto fail;
1549 }
1550
1551 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1552 pa_log("Failed to parse timer_scheduling argument.");
1553 goto fail;
1554 }
1555
1556 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1557 pa_log("Failed to parse ignore_dB argument.");
1558 goto fail;
1559 }
1560
1561 use_tsched = pa_alsa_may_tsched(use_tsched);
1562
1563 u = pa_xnew0(struct userdata, 1);
1564 u->core = m->core;
1565 u->module = m;
1566 u->use_mmap = use_mmap;
1567 u->use_tsched = use_tsched;
1568 u->first = TRUE;
1569 u->rtpoll = pa_rtpoll_new();
1570 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1571
1572 u->smoother = pa_smoother_new(
1573 SMOOTHER_ADJUST_USEC,
1574 SMOOTHER_WINDOW_USEC,
1575 TRUE,
1576 TRUE,
1577 5,
1578 pa_rtclock_now(),
1579 TRUE);
1580 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1581
1582 dev_id = pa_modargs_get_value(
1583 ma, "device_id",
1584 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1585
1586 if (reserve_init(u, dev_id) < 0)
1587 goto fail;
1588
1589 if (reserve_monitor_init(u, dev_id) < 0)
1590 goto fail;
1591
1592 b = use_mmap;
1593 d = use_tsched;
1594
1595 if (mapping) {
1596
1597 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1598 pa_log("device_id= not set");
1599 goto fail;
1600 }
1601
1602 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1603 dev_id,
1604 &u->device_name,
1605 &ss, &map,
1606 SND_PCM_STREAM_CAPTURE,
1607 &period_frames, &buffer_frames, tsched_frames,
1608 &b, &d, mapping)))
1609 goto fail;
1610
1611 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1612
1613 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1614 goto fail;
1615
1616 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1617 dev_id,
1618 &u->device_name,
1619 &ss, &map,
1620 SND_PCM_STREAM_CAPTURE,
1621 &period_frames, &buffer_frames, tsched_frames,
1622 &b, &d, profile_set, &mapping)))
1623 goto fail;
1624
1625 } else {
1626
1627 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1628 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1629 &u->device_name,
1630 &ss, &map,
1631 SND_PCM_STREAM_CAPTURE,
1632 &period_frames, &buffer_frames, tsched_frames,
1633 &b, &d, FALSE)))
1634 goto fail;
1635 }
1636
1637 pa_assert(u->device_name);
1638 pa_log_info("Successfully opened device %s.", u->device_name);
1639
1640 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1641 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1642 goto fail;
1643 }
1644
1645 if (mapping)
1646 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1647
1648 if (use_mmap && !b) {
1649 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1650 u->use_mmap = use_mmap = FALSE;
1651 }
1652
1653 if (use_tsched && (!b || !d)) {
1654 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1655 u->use_tsched = use_tsched = FALSE;
1656 }
1657
1658 if (u->use_mmap)
1659 pa_log_info("Successfully enabled mmap() mode.");
1660
1661 if (u->use_tsched)
1662 pa_log_info("Successfully enabled timer-based scheduling mode.");
1663
1664 /* ALSA might tweak the sample spec, so recalculate the frame size */
1665 frame_size = pa_frame_size(&ss);
1666
1667 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1668
1669 pa_source_new_data_init(&data);
1670 data.driver = driver;
1671 data.module = m;
1672 data.card = card;
1673 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1674
1675 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1676 * variable instead of using &data.namereg_fail directly, because
1677 * data.namereg_fail is a bitfield and taking the address of a bitfield
1678 * variable is impossible. */
1679 namereg_fail = data.namereg_fail;
1680 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1681 pa_log("Failed to parse boolean argument namereg_fail.");
1682 pa_source_new_data_done(&data);
1683 goto fail;
1684 }
1685 data.namereg_fail = namereg_fail;
1686
1687 pa_source_new_data_set_sample_spec(&data, &ss);
1688 pa_source_new_data_set_channel_map(&data, &map);
1689
1690 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1691 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1692 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1693 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1694 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1695
1696 if (mapping) {
1697 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1698 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1699 }
1700
1701 pa_alsa_init_description(data.proplist);
1702
1703 if (u->control_device)
1704 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1705
1706 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1707 pa_log("Invalid properties");
1708 pa_source_new_data_done(&data);
1709 goto fail;
1710 }
1711
1712 if (u->mixer_path_set)
1713 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1714
1715 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1716 pa_source_new_data_done(&data);
1717
1718 if (!u->source) {
1719 pa_log("Failed to create source object");
1720 goto fail;
1721 }
1722
1723 u->source->parent.process_msg = source_process_msg;
1724 if (u->use_tsched)
1725 u->source->update_requested_latency = source_update_requested_latency_cb;
1726 u->source->set_state = source_set_state_cb;
1727 u->source->set_port = source_set_port_cb;
1728 u->source->userdata = u;
1729
1730 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1731 pa_source_set_rtpoll(u->source, u->rtpoll);
1732
1733 u->frame_size = frame_size;
1734 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1735 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1736 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1737
1738 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1739 (double) u->hwbuf_size / (double) u->fragment_size,
1740 (long unsigned) u->fragment_size,
1741 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1742 (long unsigned) u->hwbuf_size,
1743 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1744
1745 if (u->use_tsched) {
1746 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1747
1748 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1749 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1750
1751 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1752 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1753
1754 fix_min_sleep_wakeup(u);
1755 fix_tsched_watermark(u);
1756
1757 pa_source_set_latency_range(u->source,
1758 0,
1759 pa_bytes_to_usec(u->hwbuf_size, &ss));
1760
1761 pa_log_info("Time scheduling watermark is %0.2fms",
1762 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1763 } else
1764 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1765
1766 reserve_update(u);
1767
1768 if (update_sw_params(u) < 0)
1769 goto fail;
1770
1771 if (setup_mixer(u, ignore_dB) < 0)
1772 goto fail;
1773
1774 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1775
1776 if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1777 pa_log("Failed to create thread.");
1778 goto fail;
1779 }
1780 /* Get initial mixer settings */
1781 if (data.volume_is_set) {
1782 if (u->source->set_volume)
1783 u->source->set_volume(u->source);
1784 } else {
1785 if (u->source->get_volume)
1786 u->source->get_volume(u->source);
1787 }
1788
1789 if (data.muted_is_set) {
1790 if (u->source->set_mute)
1791 u->source->set_mute(u->source);
1792 } else {
1793 if (u->source->get_mute)
1794 u->source->get_mute(u->source);
1795 }
1796
1797 pa_source_put(u->source);
1798
1799 if (profile_set)
1800 pa_alsa_profile_set_free(profile_set);
1801
1802 return u->source;
1803
1804 fail:
1805
1806 if (u)
1807 userdata_free(u);
1808
1809 if (profile_set)
1810 pa_alsa_profile_set_free(profile_set);
1811
1812 return NULL;
1813 }
1814
1815 static void userdata_free(struct userdata *u) {
1816 pa_assert(u);
1817
1818 if (u->source)
1819 pa_source_unlink(u->source);
1820
1821 if (u->thread) {
1822 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1823 pa_thread_free(u->thread);
1824 }
1825
1826 pa_thread_mq_done(&u->thread_mq);
1827
1828 if (u->source)
1829 pa_source_unref(u->source);
1830
1831 if (u->alsa_rtpoll_item)
1832 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1833
1834 if (u->rtpoll)
1835 pa_rtpoll_free(u->rtpoll);
1836
1837 if (u->pcm_handle) {
1838 snd_pcm_drop(u->pcm_handle);
1839 snd_pcm_close(u->pcm_handle);
1840 }
1841
1842 if (u->mixer_fdl)
1843 pa_alsa_fdlist_free(u->mixer_fdl);
1844
1845 if (u->mixer_path_set)
1846 pa_alsa_path_set_free(u->mixer_path_set);
1847 else if (u->mixer_path)
1848 pa_alsa_path_free(u->mixer_path);
1849
1850 if (u->mixer_handle)
1851 snd_mixer_close(u->mixer_handle);
1852
1853 if (u->smoother)
1854 pa_smoother_free(u->smoother);
1855
1856 reserve_done(u);
1857 monitor_done(u);
1858
1859 pa_xfree(u->device_name);
1860 pa_xfree(u->control_device);
1861 pa_xfree(u);
1862 }
1863
1864 void pa_alsa_source_free(pa_source *s) {
1865 struct userdata *u;
1866
1867 pa_source_assert_ref(s);
1868 pa_assert_se(u = s->userdata);
1869
1870 userdata_free(u);
1871 }