]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
77dbb8651d3308db35c75c8f2ba8cd0fa65c08c8
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <signal.h>
28 #include <stdio.h>
29
30 #include <asoundlib.h>
31
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/thread-mq.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/time-smoother.h>
52
53 #include <modules/reserve-wrap.h>
54
55 #include "alsa-util.h"
56 #include "alsa-source.h"
57
58 /* #define DEBUG_TIMING */
59
60 #define DEFAULT_DEVICE "default"
61
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64
65 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
66 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
67 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
68 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
69 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
70 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
71
72 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
73 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
74
75 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
76 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
77
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
80
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
82
83 struct userdata {
84 pa_core *core;
85 pa_module *module;
86 pa_source *source;
87
88 pa_thread *thread;
89 pa_thread_mq thread_mq;
90 pa_rtpoll *rtpoll;
91
92 snd_pcm_t *pcm_handle;
93
94 char *paths_dir;
95 pa_alsa_fdlist *mixer_fdl;
96 pa_alsa_mixer_pdata *mixer_pd;
97 snd_mixer_t *mixer_handle;
98 pa_alsa_path_set *mixer_path_set;
99 pa_alsa_path *mixer_path;
100
101 pa_cvolume hardware_volume;
102
103 unsigned int *rates;
104
105 size_t
106 frame_size,
107 fragment_size,
108 hwbuf_size,
109 tsched_watermark,
110 tsched_watermark_ref,
111 hwbuf_unused,
112 min_sleep,
113 min_wakeup,
114 watermark_inc_step,
115 watermark_dec_step,
116 watermark_inc_threshold,
117 watermark_dec_threshold;
118
119 pa_usec_t watermark_dec_not_before;
120 pa_usec_t min_latency_ref;
121 pa_usec_t tsched_watermark_usec;
122
123 char *device_name; /* name of the PCM device */
124 char *control_device; /* name of the control device */
125
126 pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
127
128 pa_bool_t first;
129
130 pa_rtpoll_item *alsa_rtpoll_item;
131
132 pa_smoother *smoother;
133 uint64_t read_count;
134 pa_usec_t smoother_interval;
135 pa_usec_t last_smoother_update;
136
137 pa_reserve_wrapper *reserve;
138 pa_hook_slot *reserve_slot;
139 pa_reserve_monitor_wrapper *monitor;
140 pa_hook_slot *monitor_slot;
141
142 /* ucm context */
143 pa_alsa_ucm_mapping_context *ucm_context;
144 };
145
146 static void userdata_free(struct userdata *u);
147
148 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
149 pa_assert(r);
150 pa_assert(u);
151
152 pa_log_debug("Suspending source %s, because another application requested us to release the device.", u->source->name);
153
154 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
155 return PA_HOOK_CANCEL;
156
157 return PA_HOOK_OK;
158 }
159
160 static void reserve_done(struct userdata *u) {
161 pa_assert(u);
162
163 if (u->reserve_slot) {
164 pa_hook_slot_free(u->reserve_slot);
165 u->reserve_slot = NULL;
166 }
167
168 if (u->reserve) {
169 pa_reserve_wrapper_unref(u->reserve);
170 u->reserve = NULL;
171 }
172 }
173
174 static void reserve_update(struct userdata *u) {
175 const char *description;
176 pa_assert(u);
177
178 if (!u->source || !u->reserve)
179 return;
180
181 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
182 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
183 }
184
185 static int reserve_init(struct userdata *u, const char *dname) {
186 char *rname;
187
188 pa_assert(u);
189 pa_assert(dname);
190
191 if (u->reserve)
192 return 0;
193
194 if (pa_in_system_mode())
195 return 0;
196
197 if (!(rname = pa_alsa_get_reserve_name(dname)))
198 return 0;
199
200 /* We are resuming, try to lock the device */
201 u->reserve = pa_reserve_wrapper_get(u->core, rname);
202 pa_xfree(rname);
203
204 if (!(u->reserve))
205 return -1;
206
207 reserve_update(u);
208
209 pa_assert(!u->reserve_slot);
210 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
211
212 return 0;
213 }
214
215 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
216 pa_assert(w);
217 pa_assert(u);
218
219 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
220 pa_log_debug("Suspending source %s, because another application is blocking the access to the device.", u->source->name);
221 pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION);
222 } else {
223 pa_log_debug("Resuming source %s, because other applications aren't blocking access to the device any more.", u->source->name);
224 pa_source_suspend(u->source, false, PA_SUSPEND_APPLICATION);
225 }
226
227 return PA_HOOK_OK;
228 }
229
230 static void monitor_done(struct userdata *u) {
231 pa_assert(u);
232
233 if (u->monitor_slot) {
234 pa_hook_slot_free(u->monitor_slot);
235 u->monitor_slot = NULL;
236 }
237
238 if (u->monitor) {
239 pa_reserve_monitor_wrapper_unref(u->monitor);
240 u->monitor = NULL;
241 }
242 }
243
244 static int reserve_monitor_init(struct userdata *u, const char *dname) {
245 char *rname;
246
247 pa_assert(u);
248 pa_assert(dname);
249
250 if (pa_in_system_mode())
251 return 0;
252
253 if (!(rname = pa_alsa_get_reserve_name(dname)))
254 return 0;
255
256 /* We are resuming, try to lock the device */
257 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
258 pa_xfree(rname);
259
260 if (!(u->monitor))
261 return -1;
262
263 pa_assert(!u->monitor_slot);
264 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
265
266 return 0;
267 }
268
269 static void fix_min_sleep_wakeup(struct userdata *u) {
270 size_t max_use, max_use_2;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 max_use = u->hwbuf_size - u->hwbuf_unused;
276 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
277
278 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
279 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
280
281 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
282 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
283 }
284
285 static void fix_tsched_watermark(struct userdata *u) {
286 size_t max_use;
287 pa_assert(u);
288 pa_assert(u->use_tsched);
289
290 max_use = u->hwbuf_size - u->hwbuf_unused;
291
292 if (u->tsched_watermark > max_use - u->min_sleep)
293 u->tsched_watermark = max_use - u->min_sleep;
294
295 if (u->tsched_watermark < u->min_wakeup)
296 u->tsched_watermark = u->min_wakeup;
297
298 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
299 }
300
301 static void increase_watermark(struct userdata *u) {
302 size_t old_watermark;
303 pa_usec_t old_min_latency, new_min_latency;
304
305 pa_assert(u);
306 pa_assert(u->use_tsched);
307
308 /* First, just try to increase the watermark */
309 old_watermark = u->tsched_watermark;
310 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
311 fix_tsched_watermark(u);
312
313 if (old_watermark != u->tsched_watermark) {
314 pa_log_info("Increasing wakeup watermark to %0.2f ms",
315 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
316 return;
317 }
318
319 /* Hmm, we cannot increase the watermark any further, hence let's
320 raise the latency unless doing so was disabled in
321 configuration */
322 if (u->fixed_latency_range)
323 return;
324
325 old_min_latency = u->source->thread_info.min_latency;
326 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
327 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
328
329 if (old_min_latency != new_min_latency) {
330 pa_log_info("Increasing minimal latency to %0.2f ms",
331 (double) new_min_latency / PA_USEC_PER_MSEC);
332
333 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
334 }
335
336 /* When we reach this we're officialy fucked! */
337 }
338
339 static void decrease_watermark(struct userdata *u) {
340 size_t old_watermark;
341 pa_usec_t now;
342
343 pa_assert(u);
344 pa_assert(u->use_tsched);
345
346 now = pa_rtclock_now();
347
348 if (u->watermark_dec_not_before <= 0)
349 goto restart;
350
351 if (u->watermark_dec_not_before > now)
352 return;
353
354 old_watermark = u->tsched_watermark;
355
356 if (u->tsched_watermark < u->watermark_dec_step)
357 u->tsched_watermark = u->tsched_watermark / 2;
358 else
359 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
360
361 fix_tsched_watermark(u);
362
363 if (old_watermark != u->tsched_watermark)
364 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
365 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
366
367 /* We don't change the latency range*/
368
369 restart:
370 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
371 }
372
373 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
374 pa_usec_t wm, usec;
375
376 pa_assert(sleep_usec);
377 pa_assert(process_usec);
378
379 pa_assert(u);
380 pa_assert(u->use_tsched);
381
382 usec = pa_source_get_requested_latency_within_thread(u->source);
383
384 if (usec == (pa_usec_t) -1)
385 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
386
387 wm = u->tsched_watermark_usec;
388
389 if (wm > usec)
390 wm = usec/2;
391
392 *sleep_usec = usec - wm;
393 *process_usec = wm;
394
395 #ifdef DEBUG_TIMING
396 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
397 (unsigned long) (usec / PA_USEC_PER_MSEC),
398 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
399 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
400 #endif
401 }
402
403 static int try_recover(struct userdata *u, const char *call, int err) {
404 pa_assert(u);
405 pa_assert(call);
406 pa_assert(err < 0);
407
408 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
409
410 pa_assert(err != -EAGAIN);
411
412 if (err == -EPIPE)
413 pa_log_debug("%s: Buffer overrun!", call);
414
415 if (err == -ESTRPIPE)
416 pa_log_debug("%s: System suspended!", call);
417
418 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
419 pa_log("%s: %s", call, pa_alsa_strerror(err));
420 return -1;
421 }
422
423 u->first = TRUE;
424 return 0;
425 }
426
427 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
428 size_t left_to_record;
429 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
430 pa_bool_t overrun = FALSE;
431
432 /* We use <= instead of < for this check here because an overrun
433 * only happens after the last sample was processed, not already when
434 * it is removed from the buffer. This is particularly important
435 * when block transfer is used. */
436
437 if (n_bytes <= rec_space)
438 left_to_record = rec_space - n_bytes;
439 else {
440
441 /* We got a dropout. What a mess! */
442 left_to_record = 0;
443 overrun = TRUE;
444
445 #ifdef DEBUG_TIMING
446 PA_DEBUG_TRAP;
447 #endif
448
449 if (pa_log_ratelimit(PA_LOG_INFO))
450 pa_log_info("Overrun!");
451 }
452
453 #ifdef DEBUG_TIMING
454 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
455 #endif
456
457 if (u->use_tsched) {
458 pa_bool_t reset_not_before = TRUE;
459
460 if (overrun || left_to_record < u->watermark_inc_threshold)
461 increase_watermark(u);
462 else if (left_to_record > u->watermark_dec_threshold) {
463 reset_not_before = FALSE;
464
465 /* We decrease the watermark only if have actually
466 * been woken up by a timeout. If something else woke
467 * us up it's too easy to fulfill the deadlines... */
468
469 if (on_timeout)
470 decrease_watermark(u);
471 }
472
473 if (reset_not_before)
474 u->watermark_dec_not_before = 0;
475 }
476
477 return left_to_record;
478 }
479
480 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
481 pa_bool_t work_done = FALSE;
482 pa_usec_t max_sleep_usec = 0, process_usec = 0;
483 size_t left_to_record;
484 unsigned j = 0;
485
486 pa_assert(u);
487 pa_source_assert_ref(u->source);
488
489 if (u->use_tsched)
490 hw_sleep_time(u, &max_sleep_usec, &process_usec);
491
492 for (;;) {
493 snd_pcm_sframes_t n;
494 size_t n_bytes;
495 int r;
496 pa_bool_t after_avail = TRUE;
497
498 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
499
500 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
501 continue;
502
503 return r;
504 }
505
506 n_bytes = (size_t) n * u->frame_size;
507
508 #ifdef DEBUG_TIMING
509 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
510 #endif
511
512 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
513 on_timeout = FALSE;
514
515 if (u->use_tsched)
516 if (!polled &&
517 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
518 #ifdef DEBUG_TIMING
519 pa_log_debug("Not reading, because too early.");
520 #endif
521 break;
522 }
523
524 if (PA_UNLIKELY(n_bytes <= 0)) {
525
526 if (polled)
527 PA_ONCE_BEGIN {
528 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
529 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
530 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
531 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
532 pa_strnull(dn));
533 pa_xfree(dn);
534 } PA_ONCE_END;
535
536 #ifdef DEBUG_TIMING
537 pa_log_debug("Not reading, because not necessary.");
538 #endif
539 break;
540 }
541
542
543 if (++j > 10) {
544 #ifdef DEBUG_TIMING
545 pa_log_debug("Not filling up, because already too many iterations.");
546 #endif
547
548 break;
549 }
550
551 polled = FALSE;
552
553 #ifdef DEBUG_TIMING
554 pa_log_debug("Reading");
555 #endif
556
557 for (;;) {
558 pa_memchunk chunk;
559 void *p;
560 int err;
561 const snd_pcm_channel_area_t *areas;
562 snd_pcm_uframes_t offset, frames;
563 snd_pcm_sframes_t sframes;
564
565 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
566 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
567
568 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
569
570 if (!after_avail && err == -EAGAIN)
571 break;
572
573 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
574 continue;
575
576 return r;
577 }
578
579 /* Make sure that if these memblocks need to be copied they will fit into one slot */
580 if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
581 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
582
583 if (!after_avail && frames == 0)
584 break;
585
586 pa_assert(frames > 0);
587 after_avail = FALSE;
588
589 /* Check these are multiples of 8 bit */
590 pa_assert((areas[0].first & 7) == 0);
591 pa_assert((areas[0].step & 7)== 0);
592
593 /* We assume a single interleaved memory buffer */
594 pa_assert((areas[0].first >> 3) == 0);
595 pa_assert((areas[0].step >> 3) == u->frame_size);
596
597 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
598
599 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
600 chunk.length = pa_memblock_get_length(chunk.memblock);
601 chunk.index = 0;
602
603 pa_source_post(u->source, &chunk);
604 pa_memblock_unref_fixed(chunk.memblock);
605
606 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
607
608 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
609 continue;
610
611 return r;
612 }
613
614 work_done = TRUE;
615
616 u->read_count += frames * u->frame_size;
617
618 #ifdef DEBUG_TIMING
619 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
620 #endif
621
622 if ((size_t) frames * u->frame_size >= n_bytes)
623 break;
624
625 n_bytes -= (size_t) frames * u->frame_size;
626 }
627 }
628
629 if (u->use_tsched) {
630 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
631 process_usec = u->tsched_watermark_usec;
632
633 if (*sleep_usec > process_usec)
634 *sleep_usec -= process_usec;
635 else
636 *sleep_usec = 0;
637 }
638
639 return work_done ? 1 : 0;
640 }
641
642 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
643 int work_done = FALSE;
644 pa_usec_t max_sleep_usec = 0, process_usec = 0;
645 size_t left_to_record;
646 unsigned j = 0;
647
648 pa_assert(u);
649 pa_source_assert_ref(u->source);
650
651 if (u->use_tsched)
652 hw_sleep_time(u, &max_sleep_usec, &process_usec);
653
654 for (;;) {
655 snd_pcm_sframes_t n;
656 size_t n_bytes;
657 int r;
658 pa_bool_t after_avail = TRUE;
659
660 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
661
662 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
663 continue;
664
665 return r;
666 }
667
668 n_bytes = (size_t) n * u->frame_size;
669 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
670 on_timeout = FALSE;
671
672 if (u->use_tsched)
673 if (!polled &&
674 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
675 break;
676
677 if (PA_UNLIKELY(n_bytes <= 0)) {
678
679 if (polled)
680 PA_ONCE_BEGIN {
681 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
682 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
683 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
684 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
685 pa_strnull(dn));
686 pa_xfree(dn);
687 } PA_ONCE_END;
688
689 break;
690 }
691
692 if (++j > 10) {
693 #ifdef DEBUG_TIMING
694 pa_log_debug("Not filling up, because already too many iterations.");
695 #endif
696
697 break;
698 }
699
700 polled = FALSE;
701
702 for (;;) {
703 void *p;
704 snd_pcm_sframes_t frames;
705 pa_memchunk chunk;
706
707 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
708
709 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
710
711 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
712 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
713
714 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
715
716 p = pa_memblock_acquire(chunk.memblock);
717 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
718 pa_memblock_release(chunk.memblock);
719
720 if (PA_UNLIKELY(frames < 0)) {
721 pa_memblock_unref(chunk.memblock);
722
723 if (!after_avail && (int) frames == -EAGAIN)
724 break;
725
726 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
727 continue;
728
729 return r;
730 }
731
732 if (!after_avail && frames == 0) {
733 pa_memblock_unref(chunk.memblock);
734 break;
735 }
736
737 pa_assert(frames > 0);
738 after_avail = FALSE;
739
740 chunk.index = 0;
741 chunk.length = (size_t) frames * u->frame_size;
742
743 pa_source_post(u->source, &chunk);
744 pa_memblock_unref(chunk.memblock);
745
746 work_done = TRUE;
747
748 u->read_count += frames * u->frame_size;
749
750 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
751
752 if ((size_t) frames * u->frame_size >= n_bytes)
753 break;
754
755 n_bytes -= (size_t) frames * u->frame_size;
756 }
757 }
758
759 if (u->use_tsched) {
760 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
761 process_usec = u->tsched_watermark_usec;
762
763 if (*sleep_usec > process_usec)
764 *sleep_usec -= process_usec;
765 else
766 *sleep_usec = 0;
767 }
768
769 return work_done ? 1 : 0;
770 }
771
772 static void update_smoother(struct userdata *u) {
773 snd_pcm_sframes_t delay = 0;
774 uint64_t position;
775 int err;
776 pa_usec_t now1 = 0, now2;
777 snd_pcm_status_t *status;
778 snd_htimestamp_t htstamp = { 0, 0 };
779
780 snd_pcm_status_alloca(&status);
781
782 pa_assert(u);
783 pa_assert(u->pcm_handle);
784
785 /* Let's update the time smoother */
786
787 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
788 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
789 return;
790 }
791
792 snd_pcm_status_get_htstamp(status, &htstamp);
793 now1 = pa_timespec_load(&htstamp);
794
795 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
796 if (now1 <= 0)
797 now1 = pa_rtclock_now();
798
799 /* check if the time since the last update is bigger than the interval */
800 if (u->last_smoother_update > 0)
801 if (u->last_smoother_update + u->smoother_interval > now1)
802 return;
803
804 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
805 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
806
807 pa_smoother_put(u->smoother, now1, now2);
808
809 u->last_smoother_update = now1;
810 /* exponentially increase the update interval up to the MAX limit */
811 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
812 }
813
814 static pa_usec_t source_get_latency(struct userdata *u) {
815 int64_t delay;
816 pa_usec_t now1, now2;
817
818 pa_assert(u);
819
820 now1 = pa_rtclock_now();
821 now2 = pa_smoother_get(u->smoother, now1);
822
823 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
824
825 return delay >= 0 ? (pa_usec_t) delay : 0;
826 }
827
828 static int build_pollfd(struct userdata *u) {
829 pa_assert(u);
830 pa_assert(u->pcm_handle);
831
832 if (u->alsa_rtpoll_item)
833 pa_rtpoll_item_free(u->alsa_rtpoll_item);
834
835 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
836 return -1;
837
838 return 0;
839 }
840
841 /* Called from IO context */
842 static int suspend(struct userdata *u) {
843 pa_assert(u);
844 pa_assert(u->pcm_handle);
845
846 pa_smoother_pause(u->smoother, pa_rtclock_now());
847
848 /* Let's suspend */
849 snd_pcm_close(u->pcm_handle);
850 u->pcm_handle = NULL;
851
852 if (u->alsa_rtpoll_item) {
853 pa_rtpoll_item_free(u->alsa_rtpoll_item);
854 u->alsa_rtpoll_item = NULL;
855 }
856
857 pa_log_info("Device suspended...");
858
859 return 0;
860 }
861
862 /* Called from IO context */
863 static int update_sw_params(struct userdata *u) {
864 snd_pcm_uframes_t avail_min;
865 int err;
866
867 pa_assert(u);
868
869 /* Use the full buffer if no one asked us for anything specific */
870 u->hwbuf_unused = 0;
871
872 if (u->use_tsched) {
873 pa_usec_t latency;
874
875 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
876 size_t b;
877
878 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
879
880 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
881
882 /* We need at least one sample in our buffer */
883
884 if (PA_UNLIKELY(b < u->frame_size))
885 b = u->frame_size;
886
887 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
888 }
889
890 fix_min_sleep_wakeup(u);
891 fix_tsched_watermark(u);
892 }
893
894 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
895
896 avail_min = 1;
897
898 if (u->use_tsched) {
899 pa_usec_t sleep_usec, process_usec;
900
901 hw_sleep_time(u, &sleep_usec, &process_usec);
902 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
903 }
904
905 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
906
907 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
908 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
909 return err;
910 }
911
912 return 0;
913 }
914
915 /* Called from IO Context on unsuspend or from main thread when creating source */
916 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
917 pa_bool_t in_thread) {
918 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
919 &u->source->sample_spec);
920
921 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
922 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
923
924 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
925 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
926
927 fix_min_sleep_wakeup(u);
928 fix_tsched_watermark(u);
929
930 if (in_thread)
931 pa_source_set_latency_range_within_thread(u->source,
932 u->min_latency_ref,
933 pa_bytes_to_usec(u->hwbuf_size, ss));
934 else {
935 pa_source_set_latency_range(u->source,
936 0,
937 pa_bytes_to_usec(u->hwbuf_size, ss));
938
939 /* work-around assert in pa_source_set_latency_within_thead,
940 keep track of min_latency and reuse it when
941 this routine is called from IO context */
942 u->min_latency_ref = u->source->thread_info.min_latency;
943 }
944
945 pa_log_info("Time scheduling watermark is %0.2fms",
946 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
947 }
948
949 /* Called from IO context */
950 static int unsuspend(struct userdata *u) {
951 pa_sample_spec ss;
952 int err;
953 pa_bool_t b, d;
954 snd_pcm_uframes_t period_size, buffer_size;
955
956 pa_assert(u);
957 pa_assert(!u->pcm_handle);
958
959 pa_log_info("Trying resume...");
960
961 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
962 SND_PCM_NONBLOCK|
963 SND_PCM_NO_AUTO_RESAMPLE|
964 SND_PCM_NO_AUTO_CHANNELS|
965 SND_PCM_NO_AUTO_FORMAT)) < 0) {
966 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
967 goto fail;
968 }
969
970 ss = u->source->sample_spec;
971 period_size = u->fragment_size / u->frame_size;
972 buffer_size = u->hwbuf_size / u->frame_size;
973 b = u->use_mmap;
974 d = u->use_tsched;
975
976 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
977 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
978 goto fail;
979 }
980
981 if (b != u->use_mmap || d != u->use_tsched) {
982 pa_log_warn("Resume failed, couldn't get original access mode.");
983 goto fail;
984 }
985
986 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
987 pa_log_warn("Resume failed, couldn't restore original sample settings.");
988 goto fail;
989 }
990
991 if (period_size*u->frame_size != u->fragment_size ||
992 buffer_size*u->frame_size != u->hwbuf_size) {
993 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
994 (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
995 (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
996 goto fail;
997 }
998
999 if (update_sw_params(u) < 0)
1000 goto fail;
1001
1002 if (build_pollfd(u) < 0)
1003 goto fail;
1004
1005 /* FIXME: We need to reload the volume somehow */
1006
1007 u->read_count = 0;
1008 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1009 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1010 u->last_smoother_update = 0;
1011
1012 u->first = TRUE;
1013
1014 /* reset the watermark to the value defined when source was created */
1015 if (u->use_tsched)
1016 reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1017
1018 pa_log_info("Resumed successfully...");
1019
1020 return 0;
1021
1022 fail:
1023 if (u->pcm_handle) {
1024 snd_pcm_close(u->pcm_handle);
1025 u->pcm_handle = NULL;
1026 }
1027
1028 return -PA_ERR_IO;
1029 }
1030
1031 /* Called from IO context */
1032 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1033 struct userdata *u = PA_SOURCE(o)->userdata;
1034
1035 switch (code) {
1036
1037 case PA_SOURCE_MESSAGE_GET_LATENCY: {
1038 pa_usec_t r = 0;
1039
1040 if (u->pcm_handle)
1041 r = source_get_latency(u);
1042
1043 *((pa_usec_t*) data) = r;
1044
1045 return 0;
1046 }
1047
1048 case PA_SOURCE_MESSAGE_SET_STATE:
1049
1050 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1051
1052 case PA_SOURCE_SUSPENDED: {
1053 int r;
1054
1055 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1056
1057 if ((r = suspend(u)) < 0)
1058 return r;
1059
1060 break;
1061 }
1062
1063 case PA_SOURCE_IDLE:
1064 case PA_SOURCE_RUNNING: {
1065 int r;
1066
1067 if (u->source->thread_info.state == PA_SOURCE_INIT) {
1068 if (build_pollfd(u) < 0)
1069 return -PA_ERR_IO;
1070 }
1071
1072 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1073 if ((r = unsuspend(u)) < 0)
1074 return r;
1075 }
1076
1077 break;
1078 }
1079
1080 case PA_SOURCE_UNLINKED:
1081 case PA_SOURCE_INIT:
1082 case PA_SOURCE_INVALID_STATE:
1083 ;
1084 }
1085
1086 break;
1087 }
1088
1089 return pa_source_process_msg(o, code, data, offset, chunk);
1090 }
1091
1092 /* Called from main context */
1093 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1094 pa_source_state_t old_state;
1095 struct userdata *u;
1096
1097 pa_source_assert_ref(s);
1098 pa_assert_se(u = s->userdata);
1099
1100 old_state = pa_source_get_state(u->source);
1101
1102 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1103 reserve_done(u);
1104 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1105 if (reserve_init(u, u->device_name) < 0)
1106 return -PA_ERR_BUSY;
1107
1108 return 0;
1109 }
1110
1111 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1112 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1113
1114 pa_assert(u);
1115 pa_assert(u->mixer_handle);
1116
1117 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1118 return 0;
1119
1120 if (!PA_SOURCE_IS_LINKED(u->source->state))
1121 return 0;
1122
1123 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1124 pa_source_set_mixer_dirty(u->source, TRUE);
1125 return 0;
1126 }
1127
1128 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1129 pa_source_get_volume(u->source, TRUE);
1130 pa_source_get_mute(u->source, TRUE);
1131 }
1132
1133 return 0;
1134 }
1135
1136 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1137 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1138
1139 pa_assert(u);
1140 pa_assert(u->mixer_handle);
1141
1142 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1143 return 0;
1144
1145 if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1146 pa_source_set_mixer_dirty(u->source, TRUE);
1147 return 0;
1148 }
1149
1150 if (mask & SND_CTL_EVENT_MASK_VALUE)
1151 pa_source_update_volume_and_mute(u->source);
1152
1153 return 0;
1154 }
1155
1156 static void source_get_volume_cb(pa_source *s) {
1157 struct userdata *u = s->userdata;
1158 pa_cvolume r;
1159 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1160
1161 pa_assert(u);
1162 pa_assert(u->mixer_path);
1163 pa_assert(u->mixer_handle);
1164
1165 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1166 return;
1167
1168 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1169 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1170
1171 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1172
1173 if (u->mixer_path->has_dB) {
1174 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1175
1176 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1177 }
1178
1179 if (pa_cvolume_equal(&u->hardware_volume, &r))
1180 return;
1181
1182 s->real_volume = u->hardware_volume = r;
1183
1184 /* Hmm, so the hardware volume changed, let's reset our software volume */
1185 if (u->mixer_path->has_dB)
1186 pa_source_set_soft_volume(s, NULL);
1187 }
1188
1189 static void source_set_volume_cb(pa_source *s) {
1190 struct userdata *u = s->userdata;
1191 pa_cvolume r;
1192 char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1193 pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1194
1195 pa_assert(u);
1196 pa_assert(u->mixer_path);
1197 pa_assert(u->mixer_handle);
1198
1199 /* Shift up by the base volume */
1200 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1201
1202 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1203 return;
1204
1205 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1206 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1207
1208 u->hardware_volume = r;
1209
1210 if (u->mixer_path->has_dB) {
1211 pa_cvolume new_soft_volume;
1212 pa_bool_t accurate_enough;
1213 char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1214
1215 /* Match exactly what the user requested by software */
1216 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1217
1218 /* If the adjustment to do in software is only minimal we
1219 * can skip it. That saves us CPU at the expense of a bit of
1220 * accuracy */
1221 accurate_enough =
1222 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1223 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1224
1225 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1226 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1227 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1228 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1229 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1230 pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1231 pa_yes_no(accurate_enough));
1232 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1233
1234 if (!accurate_enough)
1235 s->soft_volume = new_soft_volume;
1236
1237 } else {
1238 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1239
1240 /* We can't match exactly what the user requested, hence let's
1241 * at least tell the user about it */
1242
1243 s->real_volume = r;
1244 }
1245 }
1246
1247 static void source_write_volume_cb(pa_source *s) {
1248 struct userdata *u = s->userdata;
1249 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1250
1251 pa_assert(u);
1252 pa_assert(u->mixer_path);
1253 pa_assert(u->mixer_handle);
1254 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1255
1256 /* Shift up by the base volume */
1257 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1258
1259 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1260 pa_log_error("Writing HW volume failed");
1261 else {
1262 pa_cvolume tmp_vol;
1263 pa_bool_t accurate_enough;
1264
1265 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1266 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1267
1268 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1269 accurate_enough =
1270 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1271 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1272
1273 if (!accurate_enough) {
1274 union {
1275 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1276 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1277 } vol;
1278
1279 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1280 pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1281 pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1282 pa_log_debug(" in dB: %s (request) != %s",
1283 pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1284 pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1285 }
1286 }
1287 }
1288
1289 static void source_get_mute_cb(pa_source *s) {
1290 struct userdata *u = s->userdata;
1291 pa_bool_t b;
1292
1293 pa_assert(u);
1294 pa_assert(u->mixer_path);
1295 pa_assert(u->mixer_handle);
1296
1297 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1298 return;
1299
1300 s->muted = b;
1301 }
1302
1303 static void source_set_mute_cb(pa_source *s) {
1304 struct userdata *u = s->userdata;
1305
1306 pa_assert(u);
1307 pa_assert(u->mixer_path);
1308 pa_assert(u->mixer_handle);
1309
1310 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1311 }
1312
1313 static void mixer_volume_init(struct userdata *u) {
1314 pa_assert(u);
1315
1316 if (!u->mixer_path->has_volume) {
1317 pa_source_set_write_volume_callback(u->source, NULL);
1318 pa_source_set_get_volume_callback(u->source, NULL);
1319 pa_source_set_set_volume_callback(u->source, NULL);
1320
1321 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1322 } else {
1323 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1324 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1325
1326 if (u->mixer_path->has_dB && u->deferred_volume) {
1327 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1328 pa_log_info("Successfully enabled deferred volume.");
1329 } else
1330 pa_source_set_write_volume_callback(u->source, NULL);
1331
1332 if (u->mixer_path->has_dB) {
1333 pa_source_enable_decibel_volume(u->source, TRUE);
1334 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1335
1336 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1337 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1338
1339 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1340 } else {
1341 pa_source_enable_decibel_volume(u->source, FALSE);
1342 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1343
1344 u->source->base_volume = PA_VOLUME_NORM;
1345 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1346 }
1347
1348 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1349 }
1350
1351 if (!u->mixer_path->has_mute) {
1352 pa_source_set_get_mute_callback(u->source, NULL);
1353 pa_source_set_set_mute_callback(u->source, NULL);
1354 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1355 } else {
1356 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1357 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1358 pa_log_info("Using hardware mute control.");
1359 }
1360 }
1361
1362 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1363 struct userdata *u = s->userdata;
1364
1365 pa_assert(u);
1366 pa_assert(p);
1367 pa_assert(u->ucm_context);
1368
1369 return pa_alsa_ucm_set_port(u->ucm_context, p, FALSE);
1370 }
1371
1372 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1373 struct userdata *u = s->userdata;
1374 pa_alsa_port_data *data;
1375
1376 pa_assert(u);
1377 pa_assert(p);
1378 pa_assert(u->mixer_handle);
1379
1380 data = PA_DEVICE_PORT_DATA(p);
1381
1382 pa_assert_se(u->mixer_path = data->path);
1383 pa_alsa_path_select(u->mixer_path, data->setting, u->mixer_handle, s->muted);
1384
1385 mixer_volume_init(u);
1386
1387 if (s->set_mute)
1388 s->set_mute(s);
1389 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1390 if (s->write_volume)
1391 s->write_volume(s);
1392 } else {
1393 if (s->set_volume)
1394 s->set_volume(s);
1395 }
1396
1397 return 0;
1398 }
1399
1400 static void source_update_requested_latency_cb(pa_source *s) {
1401 struct userdata *u = s->userdata;
1402 pa_assert(u);
1403 pa_assert(u->use_tsched); /* only when timer scheduling is used
1404 * we can dynamically adjust the
1405 * latency */
1406
1407 if (!u->pcm_handle)
1408 return;
1409
1410 update_sw_params(u);
1411 }
1412
1413 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate) {
1414 struct userdata *u = s->userdata;
1415 int i;
1416 pa_bool_t supported = FALSE;
1417
1418 pa_assert(u);
1419
1420 for (i = 0; u->rates[i]; i++) {
1421 if (u->rates[i] == rate) {
1422 supported = TRUE;
1423 break;
1424 }
1425 }
1426
1427 if (!supported) {
1428 pa_log_info("Source does not support sample rate of %d Hz", rate);
1429 return FALSE;
1430 }
1431
1432 if (!PA_SOURCE_IS_OPENED(s->state)) {
1433 pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1434 u->source->sample_spec.rate = rate;
1435 return TRUE;
1436 }
1437
1438 return FALSE;
1439 }
1440
1441 static void thread_func(void *userdata) {
1442 struct userdata *u = userdata;
1443 unsigned short revents = 0;
1444
1445 pa_assert(u);
1446
1447 pa_log_debug("Thread starting up");
1448
1449 if (u->core->realtime_scheduling)
1450 pa_make_realtime(u->core->realtime_priority);
1451
1452 pa_thread_mq_install(&u->thread_mq);
1453
1454 for (;;) {
1455 int ret;
1456 pa_usec_t rtpoll_sleep = 0, real_sleep;
1457
1458 #ifdef DEBUG_TIMING
1459 pa_log_debug("Loop");
1460 #endif
1461
1462 /* Read some data and pass it to the sources */
1463 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1464 int work_done;
1465 pa_usec_t sleep_usec = 0;
1466 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1467
1468 if (u->first) {
1469 pa_log_info("Starting capture.");
1470 snd_pcm_start(u->pcm_handle);
1471
1472 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1473
1474 u->first = FALSE;
1475 }
1476
1477 if (u->use_mmap)
1478 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1479 else
1480 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1481
1482 if (work_done < 0)
1483 goto fail;
1484
1485 /* pa_log_debug("work_done = %i", work_done); */
1486
1487 if (work_done)
1488 update_smoother(u);
1489
1490 if (u->use_tsched) {
1491 pa_usec_t cusec;
1492
1493 /* OK, the capture buffer is now empty, let's
1494 * calculate when to wake up next */
1495
1496 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1497
1498 /* Convert from the sound card time domain to the
1499 * system time domain */
1500 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1501
1502 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1503
1504 /* We don't trust the conversion, so we wake up whatever comes first */
1505 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1506 }
1507 }
1508
1509 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1510 pa_usec_t volume_sleep;
1511 pa_source_volume_change_apply(u->source, &volume_sleep);
1512 if (volume_sleep > 0) {
1513 if (rtpoll_sleep > 0)
1514 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1515 else
1516 rtpoll_sleep = volume_sleep;
1517 }
1518 }
1519
1520 if (rtpoll_sleep > 0) {
1521 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1522 real_sleep = pa_rtclock_now();
1523 }
1524 else
1525 pa_rtpoll_set_timer_disabled(u->rtpoll);
1526
1527 /* Hmm, nothing to do. Let's sleep */
1528 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1529 goto fail;
1530
1531 if (rtpoll_sleep > 0) {
1532 real_sleep = pa_rtclock_now() - real_sleep;
1533 #ifdef DEBUG_TIMING
1534 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1535 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1536 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1537 #endif
1538 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1539 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1540 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1541 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1542 }
1543
1544 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1545 pa_source_volume_change_apply(u->source, NULL);
1546
1547 if (ret == 0)
1548 goto finish;
1549
1550 /* Tell ALSA about this and process its response */
1551 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1552 struct pollfd *pollfd;
1553 int err;
1554 unsigned n;
1555
1556 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1557
1558 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1559 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1560 goto fail;
1561 }
1562
1563 if (revents & ~POLLIN) {
1564 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1565 goto fail;
1566
1567 u->first = TRUE;
1568 revents = 0;
1569 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1570 pa_log_debug("Wakeup from ALSA!");
1571
1572 } else
1573 revents = 0;
1574 }
1575
1576 fail:
1577 /* If this was no regular exit from the loop we have to continue
1578 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1579 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1580 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1581
1582 finish:
1583 pa_log_debug("Thread shutting down");
1584 }
1585
1586 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1587 const char *n;
1588 char *t;
1589
1590 pa_assert(data);
1591 pa_assert(ma);
1592 pa_assert(device_name);
1593
1594 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1595 pa_source_new_data_set_name(data, n);
1596 data->namereg_fail = TRUE;
1597 return;
1598 }
1599
1600 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1601 data->namereg_fail = TRUE;
1602 else {
1603 n = device_id ? device_id : device_name;
1604 data->namereg_fail = FALSE;
1605 }
1606
1607 if (mapping)
1608 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1609 else
1610 t = pa_sprintf_malloc("alsa_input.%s", n);
1611
1612 pa_source_new_data_set_name(data, t);
1613 pa_xfree(t);
1614 }
1615
1616 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1617 snd_hctl_t *hctl;
1618
1619 if (!mapping && !element)
1620 return;
1621
1622 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1623 pa_log_info("Failed to find a working mixer device.");
1624 return;
1625 }
1626
1627 if (element) {
1628
1629 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1630 goto fail;
1631
1632 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1633 goto fail;
1634
1635 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1636 pa_alsa_path_dump(u->mixer_path);
1637 } else if (!(u->mixer_path_set = mapping->input_path_set))
1638 goto fail;
1639
1640 return;
1641
1642 fail:
1643
1644 if (u->mixer_path) {
1645 pa_alsa_path_free(u->mixer_path);
1646 u->mixer_path = NULL;
1647 }
1648
1649 if (u->mixer_handle) {
1650 snd_mixer_close(u->mixer_handle);
1651 u->mixer_handle = NULL;
1652 }
1653 }
1654
1655 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1656 pa_bool_t need_mixer_callback = FALSE;
1657
1658 pa_assert(u);
1659
1660 if (!u->mixer_handle)
1661 return 0;
1662
1663 if (u->source->active_port) {
1664 pa_alsa_port_data *data;
1665
1666 /* We have a list of supported paths, so let's activate the
1667 * one that has been chosen as active */
1668
1669 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1670 u->mixer_path = data->path;
1671
1672 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1673
1674 } else {
1675
1676 if (!u->mixer_path && u->mixer_path_set)
1677 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1678
1679 if (u->mixer_path) {
1680 /* Hmm, we have only a single path, then let's activate it */
1681
1682 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1683 } else
1684 return 0;
1685 }
1686
1687 mixer_volume_init(u);
1688
1689 /* Will we need to register callbacks? */
1690 if (u->mixer_path_set && u->mixer_path_set->paths) {
1691 pa_alsa_path *p;
1692 void *state;
1693
1694 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1695 if (p->has_volume || p->has_mute)
1696 need_mixer_callback = TRUE;
1697 }
1698 }
1699 else if (u->mixer_path)
1700 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1701
1702 if (need_mixer_callback) {
1703 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1704 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1705 u->mixer_pd = pa_alsa_mixer_pdata_new();
1706 mixer_callback = io_mixer_callback;
1707
1708 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1709 pa_log("Failed to initialize file descriptor monitoring");
1710 return -1;
1711 }
1712 } else {
1713 u->mixer_fdl = pa_alsa_fdlist_new();
1714 mixer_callback = ctl_mixer_callback;
1715
1716 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1717 pa_log("Failed to initialize file descriptor monitoring");
1718 return -1;
1719 }
1720 }
1721
1722 if (u->mixer_path_set)
1723 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1724 else
1725 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1726 }
1727
1728 return 0;
1729 }
1730
1731 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1732
1733 struct userdata *u = NULL;
1734 const char *dev_id = NULL, *key, *mod_name;
1735 pa_sample_spec ss;
1736 char *thread_name = NULL;
1737 uint32_t alternate_sample_rate;
1738 pa_channel_map map;
1739 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1740 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1741 size_t frame_size;
1742 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, fixed_latency_range = FALSE;
1743 pa_source_new_data data;
1744 pa_alsa_profile_set *profile_set = NULL;
1745 void *state = NULL;
1746
1747 pa_assert(m);
1748 pa_assert(ma);
1749
1750 ss = m->core->default_sample_spec;
1751 map = m->core->default_channel_map;
1752 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1753 pa_log("Failed to parse sample specification and channel map");
1754 goto fail;
1755 }
1756
1757 alternate_sample_rate = m->core->alternate_sample_rate;
1758 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1759 pa_log("Failed to parse alternate sample rate");
1760 goto fail;
1761 }
1762
1763 frame_size = pa_frame_size(&ss);
1764
1765 nfrags = m->core->default_n_fragments;
1766 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1767 if (frag_size <= 0)
1768 frag_size = (uint32_t) frame_size;
1769 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1770 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1771
1772 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1773 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1774 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1775 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1776 pa_log("Failed to parse buffer metrics");
1777 goto fail;
1778 }
1779
1780 buffer_size = nfrags * frag_size;
1781
1782 period_frames = frag_size/frame_size;
1783 buffer_frames = buffer_size/frame_size;
1784 tsched_frames = tsched_size/frame_size;
1785
1786 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1787 pa_log("Failed to parse mmap argument.");
1788 goto fail;
1789 }
1790
1791 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1792 pa_log("Failed to parse tsched argument.");
1793 goto fail;
1794 }
1795
1796 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1797 pa_log("Failed to parse ignore_dB argument.");
1798 goto fail;
1799 }
1800
1801 deferred_volume = m->core->deferred_volume;
1802 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1803 pa_log("Failed to parse deferred_volume argument.");
1804 goto fail;
1805 }
1806
1807 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1808 pa_log("Failed to parse fixed_latency_range argument.");
1809 goto fail;
1810 }
1811
1812 use_tsched = pa_alsa_may_tsched(use_tsched);
1813
1814 u = pa_xnew0(struct userdata, 1);
1815 u->core = m->core;
1816 u->module = m;
1817 u->use_mmap = use_mmap;
1818 u->use_tsched = use_tsched;
1819 u->deferred_volume = deferred_volume;
1820 u->fixed_latency_range = fixed_latency_range;
1821 u->first = TRUE;
1822 u->rtpoll = pa_rtpoll_new();
1823 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1824
1825 u->smoother = pa_smoother_new(
1826 SMOOTHER_ADJUST_USEC,
1827 SMOOTHER_WINDOW_USEC,
1828 TRUE,
1829 TRUE,
1830 5,
1831 pa_rtclock_now(),
1832 TRUE);
1833 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1834
1835 /* use ucm */
1836 if (mapping && mapping->ucm_context.ucm)
1837 u->ucm_context = &mapping->ucm_context;
1838
1839 dev_id = pa_modargs_get_value(
1840 ma, "device_id",
1841 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1842
1843 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1844
1845 if (reserve_init(u, dev_id) < 0)
1846 goto fail;
1847
1848 if (reserve_monitor_init(u, dev_id) < 0)
1849 goto fail;
1850
1851 b = use_mmap;
1852 d = use_tsched;
1853
1854 if (mapping) {
1855
1856 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1857 pa_log("device_id= not set");
1858 goto fail;
1859 }
1860
1861 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
1862 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
1863 pa_log("Failed to enable ucm modifier %s", mod_name);
1864 else
1865 pa_log_debug("Enabled ucm modifier %s", mod_name);
1866 }
1867
1868 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1869 dev_id,
1870 &u->device_name,
1871 &ss, &map,
1872 SND_PCM_STREAM_CAPTURE,
1873 &period_frames, &buffer_frames, tsched_frames,
1874 &b, &d, mapping)))
1875 goto fail;
1876
1877 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1878
1879 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1880 goto fail;
1881
1882 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1883 dev_id,
1884 &u->device_name,
1885 &ss, &map,
1886 SND_PCM_STREAM_CAPTURE,
1887 &period_frames, &buffer_frames, tsched_frames,
1888 &b, &d, profile_set, &mapping)))
1889 goto fail;
1890
1891 } else {
1892
1893 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1894 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1895 &u->device_name,
1896 &ss, &map,
1897 SND_PCM_STREAM_CAPTURE,
1898 &period_frames, &buffer_frames, tsched_frames,
1899 &b, &d, FALSE)))
1900 goto fail;
1901 }
1902
1903 pa_assert(u->device_name);
1904 pa_log_info("Successfully opened device %s.", u->device_name);
1905
1906 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1907 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1908 goto fail;
1909 }
1910
1911 if (mapping)
1912 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1913
1914 if (use_mmap && !b) {
1915 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1916 u->use_mmap = use_mmap = FALSE;
1917 }
1918
1919 if (use_tsched && (!b || !d)) {
1920 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1921 u->use_tsched = use_tsched = FALSE;
1922 }
1923
1924 if (u->use_mmap)
1925 pa_log_info("Successfully enabled mmap() mode.");
1926
1927 if (u->use_tsched) {
1928 pa_log_info("Successfully enabled timer-based scheduling mode.");
1929 if (u->fixed_latency_range)
1930 pa_log_info("Disabling latency range changes on overrun");
1931 }
1932
1933 u->rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
1934 if (!u->rates) {
1935 pa_log_error("Failed to find any supported sample rates.");
1936 goto fail;
1937 }
1938
1939 /* ALSA might tweak the sample spec, so recalculate the frame size */
1940 frame_size = pa_frame_size(&ss);
1941
1942 if (!u->ucm_context)
1943 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1944
1945 pa_source_new_data_init(&data);
1946 data.driver = driver;
1947 data.module = m;
1948 data.card = card;
1949 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1950
1951 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1952 * variable instead of using &data.namereg_fail directly, because
1953 * data.namereg_fail is a bitfield and taking the address of a bitfield
1954 * variable is impossible. */
1955 namereg_fail = data.namereg_fail;
1956 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1957 pa_log("Failed to parse namereg_fail argument.");
1958 pa_source_new_data_done(&data);
1959 goto fail;
1960 }
1961 data.namereg_fail = namereg_fail;
1962
1963 pa_source_new_data_set_sample_spec(&data, &ss);
1964 pa_source_new_data_set_channel_map(&data, &map);
1965 pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1966
1967 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1968 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1969 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1970 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1971 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1972
1973 if (mapping) {
1974 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1975 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1976
1977 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
1978 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
1979 }
1980
1981 pa_alsa_init_description(data.proplist);
1982
1983 if (u->control_device)
1984 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1985
1986 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1987 pa_log("Invalid properties");
1988 pa_source_new_data_done(&data);
1989 goto fail;
1990 }
1991
1992 if (u->ucm_context)
1993 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, FALSE, card);
1994 else if (u->mixer_path_set)
1995 pa_alsa_add_ports(&data, u->mixer_path_set, card);
1996
1997 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1998 pa_source_new_data_done(&data);
1999
2000 if (!u->source) {
2001 pa_log("Failed to create source object");
2002 goto fail;
2003 }
2004
2005 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2006 &u->source->thread_info.volume_change_safety_margin) < 0) {
2007 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2008 goto fail;
2009 }
2010
2011 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2012 &u->source->thread_info.volume_change_extra_delay) < 0) {
2013 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2014 goto fail;
2015 }
2016
2017 u->source->parent.process_msg = source_process_msg;
2018 if (u->use_tsched)
2019 u->source->update_requested_latency = source_update_requested_latency_cb;
2020 u->source->set_state = source_set_state_cb;
2021 if (u->ucm_context)
2022 u->source->set_port = source_set_port_ucm_cb;
2023 else
2024 u->source->set_port = source_set_port_cb;
2025 if (u->source->alternate_sample_rate)
2026 u->source->update_rate = source_update_rate_cb;
2027 u->source->userdata = u;
2028
2029 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2030 pa_source_set_rtpoll(u->source, u->rtpoll);
2031
2032 u->frame_size = frame_size;
2033 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2034 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2035 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2036
2037 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2038 (double) u->hwbuf_size / (double) u->fragment_size,
2039 (long unsigned) u->fragment_size,
2040 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2041 (long unsigned) u->hwbuf_size,
2042 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2043
2044 if (u->use_tsched) {
2045 u->tsched_watermark_ref = tsched_watermark;
2046 reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2047 }
2048 else
2049 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2050
2051 reserve_update(u);
2052
2053 if (update_sw_params(u) < 0)
2054 goto fail;
2055
2056 if (u->ucm_context) {
2057 if (u->source->active_port && pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, FALSE) < 0)
2058 goto fail;
2059 } else if (setup_mixer(u, ignore_dB) < 0)
2060 goto fail;
2061
2062 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2063
2064 thread_name = pa_sprintf_malloc("alsa-source-%s", pa_strnull(pa_proplist_gets(u->source->proplist, "alsa.id")));
2065 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2066 pa_log("Failed to create thread.");
2067 goto fail;
2068 }
2069 pa_xfree(thread_name);
2070 thread_name = NULL;
2071
2072 /* Get initial mixer settings */
2073 if (data.volume_is_set) {
2074 if (u->source->set_volume)
2075 u->source->set_volume(u->source);
2076 } else {
2077 if (u->source->get_volume)
2078 u->source->get_volume(u->source);
2079 }
2080
2081 if (data.muted_is_set) {
2082 if (u->source->set_mute)
2083 u->source->set_mute(u->source);
2084 } else {
2085 if (u->source->get_mute)
2086 u->source->get_mute(u->source);
2087 }
2088
2089 if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2090 u->source->write_volume(u->source);
2091
2092 pa_source_put(u->source);
2093
2094 if (profile_set)
2095 pa_alsa_profile_set_free(profile_set);
2096
2097 return u->source;
2098
2099 fail:
2100 pa_xfree(thread_name);
2101
2102 if (u)
2103 userdata_free(u);
2104
2105 if (profile_set)
2106 pa_alsa_profile_set_free(profile_set);
2107
2108 return NULL;
2109 }
2110
2111 static void userdata_free(struct userdata *u) {
2112 pa_assert(u);
2113
2114 if (u->source)
2115 pa_source_unlink(u->source);
2116
2117 if (u->thread) {
2118 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2119 pa_thread_free(u->thread);
2120 }
2121
2122 pa_thread_mq_done(&u->thread_mq);
2123
2124 if (u->source)
2125 pa_source_unref(u->source);
2126
2127 if (u->mixer_pd)
2128 pa_alsa_mixer_pdata_free(u->mixer_pd);
2129
2130 if (u->alsa_rtpoll_item)
2131 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2132
2133 if (u->rtpoll)
2134 pa_rtpoll_free(u->rtpoll);
2135
2136 if (u->pcm_handle) {
2137 snd_pcm_drop(u->pcm_handle);
2138 snd_pcm_close(u->pcm_handle);
2139 }
2140
2141 if (u->mixer_fdl)
2142 pa_alsa_fdlist_free(u->mixer_fdl);
2143
2144 if (u->mixer_path && !u->mixer_path_set)
2145 pa_alsa_path_free(u->mixer_path);
2146
2147 if (u->mixer_handle)
2148 snd_mixer_close(u->mixer_handle);
2149
2150 if (u->smoother)
2151 pa_smoother_free(u->smoother);
2152
2153 if (u->rates)
2154 pa_xfree(u->rates);
2155
2156 reserve_done(u);
2157 monitor_done(u);
2158
2159 pa_xfree(u->device_name);
2160 pa_xfree(u->control_device);
2161 pa_xfree(u->paths_dir);
2162 pa_xfree(u);
2163 }
2164
2165 void pa_alsa_source_free(pa_source *s) {
2166 struct userdata *u;
2167
2168 pa_source_assert_ref(s);
2169 pa_assert_se(u = s->userdata);
2170
2171 userdata_free(u);
2172 }