]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
alsa: automatically decrease watermark after a time of stability
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core-error.h>
38 #include <pulsecore/core.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/time-smoother.h>
53
54 #include <modules/reserve-wrap.h>
55
56 #include "alsa-util.h"
57 #include "alsa-source.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62
63 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
64 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
65
66 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
67 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
68 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
69 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (1*PA_USEC_PER_MSEC) /* 3ms */
70 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
71 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72
73 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
74 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
75
76 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
77 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
78
79 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
80
81 struct userdata {
82 pa_core *core;
83 pa_module *module;
84 pa_source *source;
85
86 pa_thread *thread;
87 pa_thread_mq thread_mq;
88 pa_rtpoll *rtpoll;
89
90 snd_pcm_t *pcm_handle;
91
92 pa_alsa_fdlist *mixer_fdl;
93 snd_mixer_t *mixer_handle;
94 pa_alsa_path_set *mixer_path_set;
95 pa_alsa_path *mixer_path;
96
97 pa_cvolume hardware_volume;
98
99 size_t
100 frame_size,
101 fragment_size,
102 hwbuf_size,
103 tsched_watermark,
104 hwbuf_unused,
105 min_sleep,
106 min_wakeup,
107 watermark_inc_step,
108 watermark_dec_step,
109 watermark_inc_threshold,
110 watermark_dec_threshold;
111
112 pa_usec_t watermark_dec_not_before;
113
114 unsigned nfragments;
115
116 char *device_name;
117 char *control_device;
118
119 pa_bool_t use_mmap:1, use_tsched:1;
120
121 pa_rtpoll_item *alsa_rtpoll_item;
122
123 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
124
125 pa_smoother *smoother;
126 uint64_t read_count;
127 pa_usec_t smoother_interval;
128 pa_usec_t last_smoother_update;
129
130 pa_reserve_wrapper *reserve;
131 pa_hook_slot *reserve_slot;
132 pa_reserve_monitor_wrapper *monitor;
133 pa_hook_slot *monitor_slot;
134 };
135
136 static void userdata_free(struct userdata *u);
137
138 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
139 pa_assert(r);
140 pa_assert(u);
141
142 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
143 return PA_HOOK_CANCEL;
144
145 return PA_HOOK_OK;
146 }
147
148 static void reserve_done(struct userdata *u) {
149 pa_assert(u);
150
151 if (u->reserve_slot) {
152 pa_hook_slot_free(u->reserve_slot);
153 u->reserve_slot = NULL;
154 }
155
156 if (u->reserve) {
157 pa_reserve_wrapper_unref(u->reserve);
158 u->reserve = NULL;
159 }
160 }
161
162 static void reserve_update(struct userdata *u) {
163 const char *description;
164 pa_assert(u);
165
166 if (!u->source || !u->reserve)
167 return;
168
169 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
170 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
171 }
172
173 static int reserve_init(struct userdata *u, const char *dname) {
174 char *rname;
175
176 pa_assert(u);
177 pa_assert(dname);
178
179 if (u->reserve)
180 return 0;
181
182 if (pa_in_system_mode())
183 return 0;
184
185 /* We are resuming, try to lock the device */
186 if (!(rname = pa_alsa_get_reserve_name(dname)))
187 return 0;
188
189 u->reserve = pa_reserve_wrapper_get(u->core, rname);
190 pa_xfree(rname);
191
192 if (!(u->reserve))
193 return -1;
194
195 reserve_update(u);
196
197 pa_assert(!u->reserve_slot);
198 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
199
200 return 0;
201 }
202
203 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
204 pa_bool_t b;
205
206 pa_assert(w);
207 pa_assert(u);
208
209 b = PA_PTR_TO_UINT(busy) && !u->reserve;
210
211 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
212 return PA_HOOK_OK;
213 }
214
215 static void monitor_done(struct userdata *u) {
216 pa_assert(u);
217
218 if (u->monitor_slot) {
219 pa_hook_slot_free(u->monitor_slot);
220 u->monitor_slot = NULL;
221 }
222
223 if (u->monitor) {
224 pa_reserve_monitor_wrapper_unref(u->monitor);
225 u->monitor = NULL;
226 }
227 }
228
229 static int reserve_monitor_init(struct userdata *u, const char *dname) {
230 char *rname;
231
232 pa_assert(u);
233 pa_assert(dname);
234
235 if (pa_in_system_mode())
236 return 0;
237
238 /* We are resuming, try to lock the device */
239 if (!(rname = pa_alsa_get_reserve_name(dname)))
240 return 0;
241
242 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
243 pa_xfree(rname);
244
245 if (!(u->monitor))
246 return -1;
247
248 pa_assert(!u->monitor_slot);
249 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
250
251 return 0;
252 }
253
254 static void fix_min_sleep_wakeup(struct userdata *u) {
255 size_t max_use, max_use_2;
256 pa_assert(u);
257 pa_assert(u->use_tsched);
258
259 max_use = u->hwbuf_size - u->hwbuf_unused;
260 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
261
262 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
263 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
264
265 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
266 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
267 }
268
269 static void fix_tsched_watermark(struct userdata *u) {
270 size_t max_use;
271 pa_assert(u);
272 pa_assert(u->use_tsched);
273
274 max_use = u->hwbuf_size - u->hwbuf_unused;
275
276 if (u->tsched_watermark > max_use - u->min_sleep)
277 u->tsched_watermark = max_use - u->min_sleep;
278
279 if (u->tsched_watermark < u->min_wakeup)
280 u->tsched_watermark = u->min_wakeup;
281 }
282
283 static void increase_watermark(struct userdata *u) {
284 size_t old_watermark;
285 pa_usec_t old_min_latency, new_min_latency;
286
287 pa_assert(u);
288 pa_assert(u->use_tsched);
289
290 /* First, just try to increase the watermark */
291 old_watermark = u->tsched_watermark;
292 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
293 fix_tsched_watermark(u);
294
295 if (old_watermark != u->tsched_watermark) {
296 pa_log_info("Increasing wakeup watermark to %0.2f ms",
297 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
298 return;
299 }
300
301 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
302 old_min_latency = u->source->thread_info.min_latency;
303 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
304 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
305
306 if (old_min_latency != new_min_latency) {
307 pa_log_info("Increasing minimal latency to %0.2f ms",
308 (double) new_min_latency / PA_USEC_PER_MSEC);
309
310 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
311 }
312
313 /* When we reach this we're officialy fucked! */
314 }
315
316 static void decrease_watermark(struct userdata *u) {
317 size_t old_watermark;
318 pa_usec_t now;
319
320 pa_assert(u);
321 pa_assert(u->use_tsched);
322
323 now = pa_rtclock_now();
324
325 if (u->watermark_dec_not_before <= 0)
326 goto restart;
327
328 if (u->watermark_dec_not_before > now)
329 return;
330
331 old_watermark = u->tsched_watermark;
332
333 if (u->tsched_watermark < u->watermark_dec_step)
334 u->tsched_watermark = u->tsched_watermark / 2;
335 else
336 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
337
338 fix_tsched_watermark(u);
339
340 if (old_watermark != u->tsched_watermark)
341 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
342 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
343
344 /* We don't change the latency range*/
345
346 restart:
347 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
348 }
349
350 static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
351 pa_usec_t wm, usec;
352
353 pa_assert(sleep_usec);
354 pa_assert(process_usec);
355
356 pa_assert(u);
357 pa_assert(u->use_tsched);
358
359 usec = pa_source_get_requested_latency_within_thread(u->source);
360
361 if (usec == (pa_usec_t) -1)
362 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
363
364 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
365
366 if (wm > usec)
367 wm = usec/2;
368
369 *sleep_usec = usec - wm;
370 *process_usec = wm;
371
372 #ifdef DEBUG_TIMING
373 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
374 (unsigned long) (usec / PA_USEC_PER_MSEC),
375 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
376 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
377 #endif
378
379 return usec;
380 }
381
382 static int try_recover(struct userdata *u, const char *call, int err) {
383 pa_assert(u);
384 pa_assert(call);
385 pa_assert(err < 0);
386
387 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
388
389 pa_assert(err != -EAGAIN);
390
391 if (err == -EPIPE)
392 pa_log_debug("%s: Buffer overrun!", call);
393
394 if (err == -ESTRPIPE)
395 pa_log_debug("%s: System suspended!", call);
396
397 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
398 pa_log("%s: %s", call, pa_alsa_strerror(err));
399 return -1;
400 }
401
402 snd_pcm_start(u->pcm_handle);
403 return 0;
404 }
405
406 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
407 size_t left_to_record;
408 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
409
410 /* We use <= instead of < for this check here because an overrun
411 * only happens after the last sample was processed, not already when
412 * it is removed from the buffer. This is particularly important
413 * when block transfer is used. */
414
415 if (n_bytes <= rec_space)
416 left_to_record = rec_space - n_bytes;
417 else {
418
419 /* We got a dropout. What a mess! */
420 left_to_record = 0;
421
422 #ifdef DEBUG_TIMING
423 PA_DEBUG_TRAP;
424 #endif
425
426 if (pa_log_ratelimit())
427 pa_log_info("Overrun!");
428 }
429
430 #ifdef DEBUG_TIMING
431 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
432 #endif
433
434 if (u->use_tsched) {
435 pa_bool_t reset_not_before = TRUE;
436
437 if (left_to_record < u->watermark_inc_threshold)
438 increase_watermark(u);
439 else if (left_to_record > u->watermark_dec_threshold) {
440 reset_not_before = FALSE;
441
442 /* We decrease the watermark only if have actually been
443 * woken up by a timeout. If something else woke us up
444 * it's too easy to fulfill the deadlines... */
445
446 if (on_timeout)
447 decrease_watermark(u);
448 }
449
450 if (reset_not_before)
451 u->watermark_dec_not_before = 0;
452 }
453
454 return left_to_record;
455 }
456
457 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
458 pa_bool_t work_done = FALSE;
459 pa_usec_t max_sleep_usec = 0, process_usec = 0;
460 size_t left_to_record;
461 unsigned j = 0;
462
463 pa_assert(u);
464 pa_source_assert_ref(u->source);
465
466 if (u->use_tsched)
467 hw_sleep_time(u, &max_sleep_usec, &process_usec);
468
469 for (;;) {
470 snd_pcm_sframes_t n;
471 size_t n_bytes;
472 int r;
473 pa_bool_t after_avail = TRUE;
474
475 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
476
477 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
478 continue;
479
480 return r;
481 }
482
483 n_bytes = (size_t) n * u->frame_size;
484
485 #ifdef DEBUG_TIMING
486 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
487 #endif
488
489 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
490 on_timeout = FALSE;
491
492 if (u->use_tsched)
493 if (!polled &&
494 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
495 #ifdef DEBUG_TIMING
496 pa_log_debug("Not reading, because too early.");
497 #endif
498 break;
499 }
500
501 if (PA_UNLIKELY(n_bytes <= 0)) {
502
503 if (polled)
504 PA_ONCE_BEGIN {
505 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
506 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
507 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
508 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
509 pa_strnull(dn));
510 pa_xfree(dn);
511 } PA_ONCE_END;
512
513 #ifdef DEBUG_TIMING
514 pa_log_debug("Not reading, because not necessary.");
515 #endif
516 break;
517 }
518
519 if (++j > 10) {
520 #ifdef DEBUG_TIMING
521 pa_log_debug("Not filling up, because already too many iterations.");
522 #endif
523
524 break;
525 }
526
527 polled = FALSE;
528
529 #ifdef DEBUG_TIMING
530 pa_log_debug("Reading");
531 #endif
532
533 for (;;) {
534 int err;
535 const snd_pcm_channel_area_t *areas;
536 snd_pcm_uframes_t offset, frames;
537 pa_memchunk chunk;
538 void *p;
539 snd_pcm_sframes_t sframes;
540
541 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
542
543 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
544
545 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
546
547 if (!after_avail && err == -EAGAIN)
548 break;
549
550 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
551 continue;
552
553 return r;
554 }
555
556 /* Make sure that if these memblocks need to be copied they will fit into one slot */
557 if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
558 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
559
560 if (!after_avail && frames == 0)
561 break;
562
563 pa_assert(frames > 0);
564 after_avail = FALSE;
565
566 /* Check these are multiples of 8 bit */
567 pa_assert((areas[0].first & 7) == 0);
568 pa_assert((areas[0].step & 7)== 0);
569
570 /* We assume a single interleaved memory buffer */
571 pa_assert((areas[0].first >> 3) == 0);
572 pa_assert((areas[0].step >> 3) == u->frame_size);
573
574 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
575
576 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
577 chunk.length = pa_memblock_get_length(chunk.memblock);
578 chunk.index = 0;
579
580 pa_source_post(u->source, &chunk);
581 pa_memblock_unref_fixed(chunk.memblock);
582
583 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
584
585 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
586 continue;
587
588 return r;
589 }
590
591 work_done = TRUE;
592
593 u->read_count += frames * u->frame_size;
594
595 #ifdef DEBUG_TIMING
596 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
597 #endif
598
599 if ((size_t) frames * u->frame_size >= n_bytes)
600 break;
601
602 n_bytes -= (size_t) frames * u->frame_size;
603 }
604 }
605
606 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
607
608 if (*sleep_usec > process_usec)
609 *sleep_usec -= process_usec;
610 else
611 *sleep_usec = 0;
612
613 return work_done ? 1 : 0;
614 }
615
616 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
617 int work_done = FALSE;
618 pa_usec_t max_sleep_usec = 0, process_usec = 0;
619 size_t left_to_record;
620 unsigned j = 0;
621
622 pa_assert(u);
623 pa_source_assert_ref(u->source);
624
625 if (u->use_tsched)
626 hw_sleep_time(u, &max_sleep_usec, &process_usec);
627
628 for (;;) {
629 snd_pcm_sframes_t n;
630 size_t n_bytes;
631 int r;
632 pa_bool_t after_avail = TRUE;
633
634 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
635
636 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
637 continue;
638
639 return r;
640 }
641
642 n_bytes = (size_t) n * u->frame_size;
643 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
644 on_timeout = FALSE;
645
646 if (u->use_tsched)
647 if (!polled &&
648 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
649 break;
650
651 if (PA_UNLIKELY(n_bytes <= 0)) {
652
653 if (polled)
654 PA_ONCE_BEGIN {
655 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
656 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
657 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
658 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
659 pa_strnull(dn));
660 pa_xfree(dn);
661 } PA_ONCE_END;
662
663 break;
664 }
665
666 if (++j > 10) {
667 #ifdef DEBUG_TIMING
668 pa_log_debug("Not filling up, because already too many iterations.");
669 #endif
670
671 break;
672 }
673
674 polled = FALSE;
675
676 for (;;) {
677 void *p;
678 snd_pcm_sframes_t frames;
679 pa_memchunk chunk;
680
681 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
682
683 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
684
685 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
686 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
687
688 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
689
690 p = pa_memblock_acquire(chunk.memblock);
691 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
692 pa_memblock_release(chunk.memblock);
693
694 if (PA_UNLIKELY(frames < 0)) {
695 pa_memblock_unref(chunk.memblock);
696
697 if (!after_avail && (int) frames == -EAGAIN)
698 break;
699
700 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
701 continue;
702
703 return r;
704 }
705
706 if (!after_avail && frames == 0) {
707 pa_memblock_unref(chunk.memblock);
708 break;
709 }
710
711 pa_assert(frames > 0);
712 after_avail = FALSE;
713
714 chunk.index = 0;
715 chunk.length = (size_t) frames * u->frame_size;
716
717 pa_source_post(u->source, &chunk);
718 pa_memblock_unref(chunk.memblock);
719
720 work_done = TRUE;
721
722 u->read_count += frames * u->frame_size;
723
724 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
725
726 if ((size_t) frames * u->frame_size >= n_bytes)
727 break;
728
729 n_bytes -= (size_t) frames * u->frame_size;
730 }
731 }
732
733 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
734
735 if (*sleep_usec > process_usec)
736 *sleep_usec -= process_usec;
737 else
738 *sleep_usec = 0;
739
740 return work_done ? 1 : 0;
741 }
742
743 static void update_smoother(struct userdata *u) {
744 snd_pcm_sframes_t delay = 0;
745 uint64_t position;
746 int err;
747 pa_usec_t now1 = 0, now2;
748 snd_pcm_status_t *status;
749
750 snd_pcm_status_alloca(&status);
751
752 pa_assert(u);
753 pa_assert(u->pcm_handle);
754
755 /* Let's update the time smoother */
756
757 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
758 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
759 return;
760 }
761
762 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
763 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
764 else {
765 snd_htimestamp_t htstamp = { 0, 0 };
766 snd_pcm_status_get_htstamp(status, &htstamp);
767 now1 = pa_timespec_load(&htstamp);
768 }
769
770 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
771 if (now1 <= 0)
772 now1 = pa_rtclock_now();
773
774 /* check if the time since the last update is bigger than the interval */
775 if (u->last_smoother_update > 0)
776 if (u->last_smoother_update + u->smoother_interval > now1)
777 return;
778
779 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
780 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
781
782 pa_smoother_put(u->smoother, now1, now2);
783
784 u->last_smoother_update = now1;
785 /* exponentially increase the update interval up to the MAX limit */
786 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
787 }
788
789 static pa_usec_t source_get_latency(struct userdata *u) {
790 int64_t delay;
791 pa_usec_t now1, now2;
792
793 pa_assert(u);
794
795 now1 = pa_rtclock_now();
796 now2 = pa_smoother_get(u->smoother, now1);
797
798 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
799
800 return delay >= 0 ? (pa_usec_t) delay : 0;
801 }
802
803 static int build_pollfd(struct userdata *u) {
804 pa_assert(u);
805 pa_assert(u->pcm_handle);
806
807 if (u->alsa_rtpoll_item)
808 pa_rtpoll_item_free(u->alsa_rtpoll_item);
809
810 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
811 return -1;
812
813 return 0;
814 }
815
816 static int suspend(struct userdata *u) {
817 pa_assert(u);
818 pa_assert(u->pcm_handle);
819
820 pa_smoother_pause(u->smoother, pa_rtclock_now());
821
822 /* Let's suspend */
823 snd_pcm_close(u->pcm_handle);
824 u->pcm_handle = NULL;
825
826 if (u->alsa_rtpoll_item) {
827 pa_rtpoll_item_free(u->alsa_rtpoll_item);
828 u->alsa_rtpoll_item = NULL;
829 }
830
831 pa_log_info("Device suspended...");
832
833 return 0;
834 }
835
836 static int update_sw_params(struct userdata *u) {
837 snd_pcm_uframes_t avail_min;
838 int err;
839
840 pa_assert(u);
841
842 /* Use the full buffer if noone asked us for anything specific */
843 u->hwbuf_unused = 0;
844
845 if (u->use_tsched) {
846 pa_usec_t latency;
847
848 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
849 size_t b;
850
851 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
852
853 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
854
855 /* We need at least one sample in our buffer */
856
857 if (PA_UNLIKELY(b < u->frame_size))
858 b = u->frame_size;
859
860 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
861 }
862
863 fix_min_sleep_wakeup(u);
864 fix_tsched_watermark(u);
865 }
866
867 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
868
869 avail_min = 1;
870
871 if (u->use_tsched) {
872 pa_usec_t sleep_usec, process_usec;
873
874 hw_sleep_time(u, &sleep_usec, &process_usec);
875 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
876 }
877
878 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
879
880 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
881 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
882 return err;
883 }
884
885 return 0;
886 }
887
888 static int unsuspend(struct userdata *u) {
889 pa_sample_spec ss;
890 int err;
891 pa_bool_t b, d;
892 unsigned nfrags;
893 snd_pcm_uframes_t period_size;
894
895 pa_assert(u);
896 pa_assert(!u->pcm_handle);
897
898 pa_log_info("Trying resume...");
899
900 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
901 /*SND_PCM_NONBLOCK|*/
902 SND_PCM_NO_AUTO_RESAMPLE|
903 SND_PCM_NO_AUTO_CHANNELS|
904 SND_PCM_NO_AUTO_FORMAT)) < 0) {
905 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
906 goto fail;
907 }
908
909 ss = u->source->sample_spec;
910 nfrags = u->nfragments;
911 period_size = u->fragment_size / u->frame_size;
912 b = u->use_mmap;
913 d = u->use_tsched;
914
915 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
916 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
917 goto fail;
918 }
919
920 if (b != u->use_mmap || d != u->use_tsched) {
921 pa_log_warn("Resume failed, couldn't get original access mode.");
922 goto fail;
923 }
924
925 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
926 pa_log_warn("Resume failed, couldn't restore original sample settings.");
927 goto fail;
928 }
929
930 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
931 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
932 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
933 (unsigned long) nfrags, period_size * u->frame_size);
934 goto fail;
935 }
936
937 if (update_sw_params(u) < 0)
938 goto fail;
939
940 if (build_pollfd(u) < 0)
941 goto fail;
942
943 /* FIXME: We need to reload the volume somehow */
944
945 snd_pcm_start(u->pcm_handle);
946
947 u->read_count = 0;
948 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
949 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
950 u->last_smoother_update = 0;
951
952 pa_log_info("Resumed successfully...");
953
954 return 0;
955
956 fail:
957 if (u->pcm_handle) {
958 snd_pcm_close(u->pcm_handle);
959 u->pcm_handle = NULL;
960 }
961
962 return -1;
963 }
964
965 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
966 struct userdata *u = PA_SOURCE(o)->userdata;
967
968 switch (code) {
969
970 case PA_SOURCE_MESSAGE_GET_LATENCY: {
971 pa_usec_t r = 0;
972
973 if (u->pcm_handle)
974 r = source_get_latency(u);
975
976 *((pa_usec_t*) data) = r;
977
978 return 0;
979 }
980
981 case PA_SOURCE_MESSAGE_SET_STATE:
982
983 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
984
985 case PA_SOURCE_SUSPENDED:
986 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
987
988 if (suspend(u) < 0)
989 return -1;
990
991 break;
992
993 case PA_SOURCE_IDLE:
994 case PA_SOURCE_RUNNING:
995
996 if (u->source->thread_info.state == PA_SOURCE_INIT) {
997 if (build_pollfd(u) < 0)
998 return -1;
999
1000 snd_pcm_start(u->pcm_handle);
1001 }
1002
1003 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1004 if (unsuspend(u) < 0)
1005 return -1;
1006 }
1007
1008 break;
1009
1010 case PA_SOURCE_UNLINKED:
1011 case PA_SOURCE_INIT:
1012 case PA_SOURCE_INVALID_STATE:
1013 ;
1014 }
1015
1016 break;
1017 }
1018
1019 return pa_source_process_msg(o, code, data, offset, chunk);
1020 }
1021
1022 /* Called from main context */
1023 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1024 pa_source_state_t old_state;
1025 struct userdata *u;
1026
1027 pa_source_assert_ref(s);
1028 pa_assert_se(u = s->userdata);
1029
1030 old_state = pa_source_get_state(u->source);
1031
1032 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1033 reserve_done(u);
1034 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1035 if (reserve_init(u, u->device_name) < 0)
1036 return -1;
1037
1038 return 0;
1039 }
1040
1041 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1042 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1043
1044 pa_assert(u);
1045 pa_assert(u->mixer_handle);
1046
1047 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1048 return 0;
1049
1050 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1051 pa_source_get_volume(u->source, TRUE);
1052 pa_source_get_mute(u->source, TRUE);
1053 }
1054
1055 return 0;
1056 }
1057
1058 static void source_get_volume_cb(pa_source *s) {
1059 struct userdata *u = s->userdata;
1060 pa_cvolume r;
1061 char t[PA_CVOLUME_SNPRINT_MAX];
1062
1063 pa_assert(u);
1064 pa_assert(u->mixer_path);
1065 pa_assert(u->mixer_handle);
1066
1067 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1068 return;
1069
1070 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1071 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1072
1073 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1074
1075 if (pa_cvolume_equal(&u->hardware_volume, &r))
1076 return;
1077
1078 s->volume = u->hardware_volume = r;
1079
1080 /* Hmm, so the hardware volume changed, let's reset our software volume */
1081 if (u->mixer_path->has_dB)
1082 pa_source_set_soft_volume(s, NULL);
1083 }
1084
1085 static void source_set_volume_cb(pa_source *s) {
1086 struct userdata *u = s->userdata;
1087 pa_cvolume r;
1088 char t[PA_CVOLUME_SNPRINT_MAX];
1089
1090 pa_assert(u);
1091 pa_assert(u->mixer_path);
1092 pa_assert(u->mixer_handle);
1093
1094 /* Shift up by the base volume */
1095 pa_sw_cvolume_divide_scalar(&r, &s->volume, s->base_volume);
1096
1097 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1098 return;
1099
1100 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1101 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1102
1103 u->hardware_volume = r;
1104
1105 if (u->mixer_path->has_dB) {
1106 pa_cvolume new_soft_volume;
1107 pa_bool_t accurate_enough;
1108
1109 /* Match exactly what the user requested by software */
1110 pa_sw_cvolume_divide(&new_soft_volume, &s->volume, &u->hardware_volume);
1111
1112 /* If the adjustment to do in software is only minimal we
1113 * can skip it. That saves us CPU at the expense of a bit of
1114 * accuracy */
1115 accurate_enough =
1116 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1117 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1118
1119 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
1120 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1121 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1122 pa_yes_no(accurate_enough));
1123
1124 if (!accurate_enough)
1125 s->soft_volume = new_soft_volume;
1126
1127 } else {
1128 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1129
1130 /* We can't match exactly what the user requested, hence let's
1131 * at least tell the user about it */
1132
1133 s->volume = r;
1134 }
1135 }
1136
1137 static void source_get_mute_cb(pa_source *s) {
1138 struct userdata *u = s->userdata;
1139 pa_bool_t b;
1140
1141 pa_assert(u);
1142 pa_assert(u->mixer_path);
1143 pa_assert(u->mixer_handle);
1144
1145 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1146 return;
1147
1148 s->muted = b;
1149 }
1150
1151 static void source_set_mute_cb(pa_source *s) {
1152 struct userdata *u = s->userdata;
1153
1154 pa_assert(u);
1155 pa_assert(u->mixer_path);
1156 pa_assert(u->mixer_handle);
1157
1158 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1159 }
1160
1161 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1162 struct userdata *u = s->userdata;
1163 pa_alsa_port_data *data;
1164
1165 pa_assert(u);
1166 pa_assert(p);
1167 pa_assert(u->mixer_handle);
1168
1169 data = PA_DEVICE_PORT_DATA(p);
1170
1171 pa_assert_se(u->mixer_path = data->path);
1172 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1173
1174 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1175 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1176 s->n_volume_steps = PA_VOLUME_NORM+1;
1177
1178 if (u->mixer_path->max_dB > 0.0)
1179 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1180 else
1181 pa_log_info("No particular base volume set, fixing to 0 dB");
1182 } else {
1183 s->base_volume = PA_VOLUME_NORM;
1184 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1185 }
1186
1187 if (data->setting)
1188 pa_alsa_setting_select(data->setting, u->mixer_handle);
1189
1190 if (s->set_mute)
1191 s->set_mute(s);
1192 if (s->set_volume)
1193 s->set_volume(s);
1194
1195 return 0;
1196 }
1197
1198 static void source_update_requested_latency_cb(pa_source *s) {
1199 struct userdata *u = s->userdata;
1200 pa_assert(u);
1201
1202 if (!u->pcm_handle)
1203 return;
1204
1205 update_sw_params(u);
1206 }
1207
1208 static void thread_func(void *userdata) {
1209 struct userdata *u = userdata;
1210 unsigned short revents = 0;
1211
1212 pa_assert(u);
1213
1214 pa_log_debug("Thread starting up");
1215
1216 if (u->core->realtime_scheduling)
1217 pa_make_realtime(u->core->realtime_priority);
1218
1219 pa_thread_mq_install(&u->thread_mq);
1220
1221 for (;;) {
1222 int ret;
1223
1224 #ifdef DEBUG_TIMING
1225 pa_log_debug("Loop");
1226 #endif
1227
1228 /* Read some data and pass it to the sources */
1229 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1230 int work_done;
1231 pa_usec_t sleep_usec = 0;
1232 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1233
1234 if (u->use_mmap)
1235 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1236 else
1237 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1238
1239 if (work_done < 0)
1240 goto fail;
1241
1242 /* pa_log_debug("work_done = %i", work_done); */
1243
1244 if (work_done)
1245 update_smoother(u);
1246
1247 if (u->use_tsched) {
1248 pa_usec_t cusec;
1249
1250 /* OK, the capture buffer is now empty, let's
1251 * calculate when to wake up next */
1252
1253 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1254
1255 /* Convert from the sound card time domain to the
1256 * system time domain */
1257 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1258
1259 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1260
1261 /* We don't trust the conversion, so we wake up whatever comes first */
1262 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1263 }
1264 } else if (u->use_tsched)
1265
1266 /* OK, we're in an invalid state, let's disable our timers */
1267 pa_rtpoll_set_timer_disabled(u->rtpoll);
1268
1269 /* Hmm, nothing to do. Let's sleep */
1270 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1271 goto fail;
1272
1273 if (ret == 0)
1274 goto finish;
1275
1276 /* Tell ALSA about this and process its response */
1277 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1278 struct pollfd *pollfd;
1279 int err;
1280 unsigned n;
1281
1282 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1283
1284 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1285 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1286 goto fail;
1287 }
1288
1289 if (revents & ~POLLIN) {
1290 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1291 goto fail;
1292
1293 snd_pcm_start(u->pcm_handle);
1294 } else if (revents && u->use_tsched && pa_log_ratelimit())
1295 pa_log_debug("Wakeup from ALSA!");
1296
1297 } else
1298 revents = 0;
1299 }
1300
1301 fail:
1302 /* If this was no regular exit from the loop we have to continue
1303 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1304 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1305 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1306
1307 finish:
1308 pa_log_debug("Thread shutting down");
1309 }
1310
1311 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1312 const char *n;
1313 char *t;
1314
1315 pa_assert(data);
1316 pa_assert(ma);
1317 pa_assert(device_name);
1318
1319 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1320 pa_source_new_data_set_name(data, n);
1321 data->namereg_fail = TRUE;
1322 return;
1323 }
1324
1325 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1326 data->namereg_fail = TRUE;
1327 else {
1328 n = device_id ? device_id : device_name;
1329 data->namereg_fail = FALSE;
1330 }
1331
1332 if (mapping)
1333 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1334 else
1335 t = pa_sprintf_malloc("alsa_input.%s", n);
1336
1337 pa_source_new_data_set_name(data, t);
1338 pa_xfree(t);
1339 }
1340
1341 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1342
1343 if (!mapping && !element)
1344 return;
1345
1346 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1347 pa_log_info("Failed to find a working mixer device.");
1348 return;
1349 }
1350
1351 if (element) {
1352
1353 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1354 goto fail;
1355
1356 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1357 goto fail;
1358
1359 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1360 pa_alsa_path_dump(u->mixer_path);
1361 } else {
1362
1363 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1364 goto fail;
1365
1366 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1367
1368 pa_log_debug("Probed mixer paths:");
1369 pa_alsa_path_set_dump(u->mixer_path_set);
1370 }
1371
1372 return;
1373
1374 fail:
1375
1376 if (u->mixer_path_set) {
1377 pa_alsa_path_set_free(u->mixer_path_set);
1378 u->mixer_path_set = NULL;
1379 } else if (u->mixer_path) {
1380 pa_alsa_path_free(u->mixer_path);
1381 u->mixer_path = NULL;
1382 }
1383
1384 if (u->mixer_handle) {
1385 snd_mixer_close(u->mixer_handle);
1386 u->mixer_handle = NULL;
1387 }
1388 }
1389
1390 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1391 pa_assert(u);
1392
1393 if (!u->mixer_handle)
1394 return 0;
1395
1396 if (u->source->active_port) {
1397 pa_alsa_port_data *data;
1398
1399 /* We have a list of supported paths, so let's activate the
1400 * one that has been chosen as active */
1401
1402 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1403 u->mixer_path = data->path;
1404
1405 pa_alsa_path_select(data->path, u->mixer_handle);
1406
1407 if (data->setting)
1408 pa_alsa_setting_select(data->setting, u->mixer_handle);
1409
1410 } else {
1411
1412 if (!u->mixer_path && u->mixer_path_set)
1413 u->mixer_path = u->mixer_path_set->paths;
1414
1415 if (u->mixer_path) {
1416 /* Hmm, we have only a single path, then let's activate it */
1417
1418 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1419
1420 if (u->mixer_path->settings)
1421 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1422 } else
1423 return 0;
1424 }
1425
1426 if (!u->mixer_path->has_volume)
1427 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1428 else {
1429
1430 if (u->mixer_path->has_dB) {
1431 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1432
1433 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1434 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1435
1436 if (u->mixer_path->max_dB > 0.0)
1437 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1438 else
1439 pa_log_info("No particular base volume set, fixing to 0 dB");
1440
1441 } else {
1442 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1443 u->source->base_volume = PA_VOLUME_NORM;
1444 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1445 }
1446
1447 u->source->get_volume = source_get_volume_cb;
1448 u->source->set_volume = source_set_volume_cb;
1449
1450 u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SOURCE_DECIBEL_VOLUME : 0);
1451 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1452 }
1453
1454 if (!u->mixer_path->has_mute) {
1455 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1456 } else {
1457 u->source->get_mute = source_get_mute_cb;
1458 u->source->set_mute = source_set_mute_cb;
1459 u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1460 pa_log_info("Using hardware mute control.");
1461 }
1462
1463 u->mixer_fdl = pa_alsa_fdlist_new();
1464
1465 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1466 pa_log("Failed to initialize file descriptor monitoring");
1467 return -1;
1468 }
1469
1470 if (u->mixer_path_set)
1471 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1472 else
1473 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1474
1475 return 0;
1476 }
1477
1478 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1479
1480 struct userdata *u = NULL;
1481 const char *dev_id = NULL;
1482 pa_sample_spec ss, requested_ss;
1483 pa_channel_map map;
1484 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1485 snd_pcm_uframes_t period_frames, tsched_frames;
1486 size_t frame_size;
1487 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1488 pa_source_new_data data;
1489 pa_alsa_profile_set *profile_set = NULL;
1490
1491 pa_assert(m);
1492 pa_assert(ma);
1493
1494 ss = m->core->default_sample_spec;
1495 map = m->core->default_channel_map;
1496 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1497 pa_log("Failed to parse sample specification");
1498 goto fail;
1499 }
1500
1501 requested_ss = ss;
1502 frame_size = pa_frame_size(&ss);
1503
1504 nfrags = m->core->default_n_fragments;
1505 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1506 if (frag_size <= 0)
1507 frag_size = (uint32_t) frame_size;
1508 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1509 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1510
1511 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1512 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1513 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1514 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1515 pa_log("Failed to parse buffer metrics");
1516 goto fail;
1517 }
1518
1519 hwbuf_size = frag_size * nfrags;
1520 period_frames = frag_size/frame_size;
1521 tsched_frames = tsched_size/frame_size;
1522
1523 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1524 pa_log("Failed to parse mmap argument.");
1525 goto fail;
1526 }
1527
1528 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1529 pa_log("Failed to parse timer_scheduling argument.");
1530 goto fail;
1531 }
1532
1533 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1534 pa_log("Failed to parse ignore_dB argument.");
1535 goto fail;
1536 }
1537
1538 if (use_tsched && !pa_rtclock_hrtimer()) {
1539 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1540 use_tsched = FALSE;
1541 }
1542
1543 u = pa_xnew0(struct userdata, 1);
1544 u->core = m->core;
1545 u->module = m;
1546 u->use_mmap = use_mmap;
1547 u->use_tsched = use_tsched;
1548 u->rtpoll = pa_rtpoll_new();
1549 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1550
1551 u->smoother = pa_smoother_new(
1552 DEFAULT_TSCHED_WATERMARK_USEC*2,
1553 DEFAULT_TSCHED_WATERMARK_USEC*2,
1554 TRUE,
1555 TRUE,
1556 5,
1557 pa_rtclock_now(),
1558 FALSE);
1559 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1560
1561 dev_id = pa_modargs_get_value(
1562 ma, "device_id",
1563 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1564
1565 if (reserve_init(u, dev_id) < 0)
1566 goto fail;
1567
1568 if (reserve_monitor_init(u, dev_id) < 0)
1569 goto fail;
1570
1571 b = use_mmap;
1572 d = use_tsched;
1573
1574 if (mapping) {
1575
1576 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1577 pa_log("device_id= not set");
1578 goto fail;
1579 }
1580
1581 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1582 dev_id,
1583 &u->device_name,
1584 &ss, &map,
1585 SND_PCM_STREAM_CAPTURE,
1586 &nfrags, &period_frames, tsched_frames,
1587 &b, &d, mapping)))
1588 goto fail;
1589
1590 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1591
1592 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1593 goto fail;
1594
1595 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1596 dev_id,
1597 &u->device_name,
1598 &ss, &map,
1599 SND_PCM_STREAM_CAPTURE,
1600 &nfrags, &period_frames, tsched_frames,
1601 &b, &d, profile_set, &mapping)))
1602 goto fail;
1603
1604 } else {
1605
1606 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1607 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1608 &u->device_name,
1609 &ss, &map,
1610 SND_PCM_STREAM_CAPTURE,
1611 &nfrags, &period_frames, tsched_frames,
1612 &b, &d, FALSE)))
1613 goto fail;
1614 }
1615
1616 pa_assert(u->device_name);
1617 pa_log_info("Successfully opened device %s.", u->device_name);
1618
1619 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1620 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1621 goto fail;
1622 }
1623
1624 if (mapping)
1625 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1626
1627 if (use_mmap && !b) {
1628 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1629 u->use_mmap = use_mmap = FALSE;
1630 }
1631
1632 if (use_tsched && (!b || !d)) {
1633 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1634 u->use_tsched = use_tsched = FALSE;
1635 }
1636
1637 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1638 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1639 u->use_tsched = use_tsched = FALSE;
1640 }
1641
1642 if (u->use_mmap)
1643 pa_log_info("Successfully enabled mmap() mode.");
1644
1645 if (u->use_tsched)
1646 pa_log_info("Successfully enabled timer-based scheduling mode.");
1647
1648 /* ALSA might tweak the sample spec, so recalculate the frame size */
1649 frame_size = pa_frame_size(&ss);
1650
1651 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1652
1653 pa_source_new_data_init(&data);
1654 data.driver = driver;
1655 data.module = m;
1656 data.card = card;
1657 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1658 pa_source_new_data_set_sample_spec(&data, &ss);
1659 pa_source_new_data_set_channel_map(&data, &map);
1660
1661 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1662 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1663 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1664 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1665 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1666
1667 if (mapping) {
1668 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1669 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1670 }
1671
1672 pa_alsa_init_description(data.proplist);
1673
1674 if (u->control_device)
1675 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1676
1677 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1678 pa_log("Invalid properties");
1679 pa_source_new_data_done(&data);
1680 goto fail;
1681 }
1682
1683 if (u->mixer_path_set)
1684 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1685
1686 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1687 pa_source_new_data_done(&data);
1688
1689 if (!u->source) {
1690 pa_log("Failed to create source object");
1691 goto fail;
1692 }
1693
1694 u->source->parent.process_msg = source_process_msg;
1695 u->source->update_requested_latency = source_update_requested_latency_cb;
1696 u->source->set_state = source_set_state_cb;
1697 u->source->set_port = source_set_port_cb;
1698 u->source->userdata = u;
1699
1700 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1701 pa_source_set_rtpoll(u->source, u->rtpoll);
1702
1703 u->frame_size = frame_size;
1704 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1705 u->nfragments = nfrags;
1706 u->hwbuf_size = u->fragment_size * nfrags;
1707 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1708
1709 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1710 nfrags, (long unsigned) u->fragment_size,
1711 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1712
1713 if (u->use_tsched) {
1714 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1715
1716 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1717 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1718
1719 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1720 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1721
1722 fix_min_sleep_wakeup(u);
1723 fix_tsched_watermark(u);
1724
1725 pa_source_set_latency_range(u->source,
1726 0,
1727 pa_bytes_to_usec(u->hwbuf_size, &ss));
1728
1729 pa_log_info("Time scheduling watermark is %0.2fms",
1730 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1731 } else
1732 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1733
1734 reserve_update(u);
1735
1736 if (update_sw_params(u) < 0)
1737 goto fail;
1738
1739 if (setup_mixer(u, ignore_dB) < 0)
1740 goto fail;
1741
1742 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1743
1744 if (!(u->thread = pa_thread_new(thread_func, u))) {
1745 pa_log("Failed to create thread.");
1746 goto fail;
1747 }
1748 /* Get initial mixer settings */
1749 if (data.volume_is_set) {
1750 if (u->source->set_volume)
1751 u->source->set_volume(u->source);
1752 } else {
1753 if (u->source->get_volume)
1754 u->source->get_volume(u->source);
1755 }
1756
1757 if (data.muted_is_set) {
1758 if (u->source->set_mute)
1759 u->source->set_mute(u->source);
1760 } else {
1761 if (u->source->get_mute)
1762 u->source->get_mute(u->source);
1763 }
1764
1765 pa_source_put(u->source);
1766
1767 if (profile_set)
1768 pa_alsa_profile_set_free(profile_set);
1769
1770 return u->source;
1771
1772 fail:
1773
1774 if (u)
1775 userdata_free(u);
1776
1777 if (profile_set)
1778 pa_alsa_profile_set_free(profile_set);
1779
1780 return NULL;
1781 }
1782
1783 static void userdata_free(struct userdata *u) {
1784 pa_assert(u);
1785
1786 if (u->source)
1787 pa_source_unlink(u->source);
1788
1789 if (u->thread) {
1790 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1791 pa_thread_free(u->thread);
1792 }
1793
1794 pa_thread_mq_done(&u->thread_mq);
1795
1796 if (u->source)
1797 pa_source_unref(u->source);
1798
1799 if (u->alsa_rtpoll_item)
1800 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1801
1802 if (u->rtpoll)
1803 pa_rtpoll_free(u->rtpoll);
1804
1805 if (u->pcm_handle) {
1806 snd_pcm_drop(u->pcm_handle);
1807 snd_pcm_close(u->pcm_handle);
1808 }
1809
1810 if (u->mixer_fdl)
1811 pa_alsa_fdlist_free(u->mixer_fdl);
1812
1813 if (u->mixer_path_set)
1814 pa_alsa_path_set_free(u->mixer_path_set);
1815 else if (u->mixer_path)
1816 pa_alsa_path_free(u->mixer_path);
1817
1818 if (u->mixer_handle)
1819 snd_mixer_close(u->mixer_handle);
1820
1821 if (u->smoother)
1822 pa_smoother_free(u->smoother);
1823
1824 reserve_done(u);
1825 monitor_done(u);
1826
1827 pa_xfree(u->device_name);
1828 pa_xfree(u->control_device);
1829 pa_xfree(u);
1830 }
1831
1832 void pa_alsa_source_free(pa_source *s) {
1833 struct userdata *u;
1834
1835 pa_source_assert_ref(s);
1836 pa_assert_se(u = s->userdata);
1837
1838 userdata_free(u);
1839 }