]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-source.c
Merge branch 'master' into master-tx
[pulseaudio] / src / modules / alsa / alsa-source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core-error.h>
38 #include <pulsecore/core.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/time-smoother.h>
53
54 #include <modules/reserve-wrap.h>
55
56 #include "alsa-util.h"
57 #include "alsa-source.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
63 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
64 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
65 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
66 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
67
68 struct userdata {
69 pa_core *core;
70 pa_module *module;
71 pa_source *source;
72
73 pa_thread *thread;
74 pa_thread_mq thread_mq;
75 pa_rtpoll *rtpoll;
76
77 snd_pcm_t *pcm_handle;
78
79 pa_alsa_fdlist *mixer_fdl;
80 snd_mixer_t *mixer_handle;
81 pa_alsa_path_set *mixer_path_set;
82 pa_alsa_path *mixer_path;
83
84 pa_cvolume hardware_volume;
85
86 size_t
87 frame_size,
88 fragment_size,
89 hwbuf_size,
90 tsched_watermark,
91 hwbuf_unused,
92 min_sleep,
93 min_wakeup,
94 watermark_step;
95
96 unsigned nfragments;
97
98 char *device_name;
99 char *control_device;
100
101 pa_bool_t use_mmap:1, use_tsched:1;
102
103 pa_rtpoll_item *alsa_rtpoll_item;
104
105 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
106
107 pa_smoother *smoother;
108 uint64_t read_count;
109
110 pa_reserve_wrapper *reserve;
111 pa_hook_slot *reserve_slot;
112 pa_reserve_monitor_wrapper *monitor;
113 pa_hook_slot *monitor_slot;
114 };
115
116 static void userdata_free(struct userdata *u);
117
118 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
119 pa_assert(r);
120 pa_assert(u);
121
122 if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
123 return PA_HOOK_CANCEL;
124
125 return PA_HOOK_OK;
126 }
127
128 static void reserve_done(struct userdata *u) {
129 pa_assert(u);
130
131 if (u->reserve_slot) {
132 pa_hook_slot_free(u->reserve_slot);
133 u->reserve_slot = NULL;
134 }
135
136 if (u->reserve) {
137 pa_reserve_wrapper_unref(u->reserve);
138 u->reserve = NULL;
139 }
140 }
141
142 static void reserve_update(struct userdata *u) {
143 const char *description;
144 pa_assert(u);
145
146 if (!u->source || !u->reserve)
147 return;
148
149 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
150 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
151 }
152
153 static int reserve_init(struct userdata *u, const char *dname) {
154 char *rname;
155
156 pa_assert(u);
157 pa_assert(dname);
158
159 if (u->reserve)
160 return 0;
161
162 if (pa_in_system_mode())
163 return 0;
164
165 /* We are resuming, try to lock the device */
166 if (!(rname = pa_alsa_get_reserve_name(dname)))
167 return 0;
168
169 u->reserve = pa_reserve_wrapper_get(u->core, rname);
170 pa_xfree(rname);
171
172 if (!(u->reserve))
173 return -1;
174
175 reserve_update(u);
176
177 pa_assert(!u->reserve_slot);
178 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
179
180 return 0;
181 }
182
183 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
184 pa_bool_t b;
185
186 pa_assert(w);
187 pa_assert(u);
188
189 b = PA_PTR_TO_UINT(busy) && !u->reserve;
190
191 pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
192 return PA_HOOK_OK;
193 }
194
195 static void monitor_done(struct userdata *u) {
196 pa_assert(u);
197
198 if (u->monitor_slot) {
199 pa_hook_slot_free(u->monitor_slot);
200 u->monitor_slot = NULL;
201 }
202
203 if (u->monitor) {
204 pa_reserve_monitor_wrapper_unref(u->monitor);
205 u->monitor = NULL;
206 }
207 }
208
209 static int reserve_monitor_init(struct userdata *u, const char *dname) {
210 char *rname;
211
212 pa_assert(u);
213 pa_assert(dname);
214
215 if (pa_in_system_mode())
216 return 0;
217
218 /* We are resuming, try to lock the device */
219 if (!(rname = pa_alsa_get_reserve_name(dname)))
220 return 0;
221
222 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
223 pa_xfree(rname);
224
225 if (!(u->monitor))
226 return -1;
227
228 pa_assert(!u->monitor_slot);
229 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
230
231 return 0;
232 }
233
234 static void fix_min_sleep_wakeup(struct userdata *u) {
235 size_t max_use, max_use_2;
236 pa_assert(u);
237
238 max_use = u->hwbuf_size - u->hwbuf_unused;
239 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
240
241 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
242 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
243
244 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
245 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
246 }
247
248 static void fix_tsched_watermark(struct userdata *u) {
249 size_t max_use;
250 pa_assert(u);
251
252 max_use = u->hwbuf_size - u->hwbuf_unused;
253
254 if (u->tsched_watermark > max_use - u->min_sleep)
255 u->tsched_watermark = max_use - u->min_sleep;
256
257 if (u->tsched_watermark < u->min_wakeup)
258 u->tsched_watermark = u->min_wakeup;
259 }
260
261 static void adjust_after_overrun(struct userdata *u) {
262 size_t old_watermark;
263 pa_usec_t old_min_latency, new_min_latency;
264
265 pa_assert(u);
266 pa_assert(u->use_tsched);
267
268 /* First, just try to increase the watermark */
269 old_watermark = u->tsched_watermark;
270 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
271
272 fix_tsched_watermark(u);
273
274 if (old_watermark != u->tsched_watermark) {
275 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
276 (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
277 return;
278 }
279
280 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
281 old_min_latency = u->source->thread_info.min_latency;
282 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
283 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
284
285 if (old_min_latency != new_min_latency) {
286 pa_log_notice("Increasing minimal latency to %0.2f ms",
287 (double) new_min_latency / PA_USEC_PER_MSEC);
288
289 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
290 return;
291 }
292
293 /* When we reach this we're officialy fucked! */
294 }
295
296 static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
297 pa_usec_t wm, usec;
298
299 pa_assert(u);
300
301 usec = pa_source_get_requested_latency_within_thread(u->source);
302
303 if (usec == (pa_usec_t) -1)
304 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
305
306 wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
307
308 if (wm > usec)
309 wm = usec/2;
310
311 *sleep_usec = usec - wm;
312 *process_usec = wm;
313
314 #ifdef DEBUG_TIMING
315 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
316 (unsigned long) (usec / PA_USEC_PER_MSEC),
317 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
318 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
319 #endif
320
321 return usec;
322 }
323
324 static int try_recover(struct userdata *u, const char *call, int err) {
325 pa_assert(u);
326 pa_assert(call);
327 pa_assert(err < 0);
328
329 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
330
331 pa_assert(err != -EAGAIN);
332
333 if (err == -EPIPE)
334 pa_log_debug("%s: Buffer overrun!", call);
335
336 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
337 pa_log("%s: %s", call, pa_alsa_strerror(err));
338 return -1;
339 }
340
341 snd_pcm_start(u->pcm_handle);
342 return 0;
343 }
344
345 static size_t check_left_to_record(struct userdata *u, size_t n_bytes) {
346 size_t left_to_record;
347 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
348
349 /* We use <= instead of < for this check here because an overrun
350 * only happens after the last sample was processed, not already when
351 * it is removed from the buffer. This is particularly important
352 * when block transfer is used. */
353
354 if (n_bytes <= rec_space) {
355 left_to_record = rec_space - n_bytes;
356
357 #ifdef DEBUG_TIMING
358 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
359 #endif
360
361 } else {
362 left_to_record = 0;
363
364 #ifdef DEBUG_TIMING
365 PA_DEBUG_TRAP;
366 #endif
367
368 if (pa_log_ratelimit())
369 pa_log_info("Overrun!");
370
371 if (u->use_tsched)
372 adjust_after_overrun(u);
373 }
374
375 return left_to_record;
376 }
377
378 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
379 pa_bool_t work_done = FALSE;
380 pa_usec_t max_sleep_usec = 0, process_usec = 0;
381 size_t left_to_record;
382 unsigned j = 0;
383
384 pa_assert(u);
385 pa_source_assert_ref(u->source);
386
387 if (u->use_tsched)
388 hw_sleep_time(u, &max_sleep_usec, &process_usec);
389
390 for (;;) {
391 snd_pcm_sframes_t n;
392 size_t n_bytes;
393 int r;
394 pa_bool_t after_avail = TRUE;
395
396 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
397
398 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
399 continue;
400
401 return r;
402 }
403
404 n_bytes = (size_t) n * u->frame_size;
405
406 #ifdef DEBUG_TIMING
407 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
408 #endif
409
410 left_to_record = check_left_to_record(u, n_bytes);
411
412 if (u->use_tsched)
413 if (!polled &&
414 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
415 #ifdef DEBUG_TIMING
416 pa_log_debug("Not reading, because too early.");
417 #endif
418 break;
419 }
420
421 if (PA_UNLIKELY(n_bytes <= 0)) {
422
423 if (polled)
424 PA_ONCE_BEGIN {
425 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
426 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
427 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
428 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
429 pa_strnull(dn));
430 pa_xfree(dn);
431 } PA_ONCE_END;
432
433 #ifdef DEBUG_TIMING
434 pa_log_debug("Not reading, because not necessary.");
435 #endif
436 break;
437 }
438
439 if (++j > 10) {
440 #ifdef DEBUG_TIMING
441 pa_log_debug("Not filling up, because already too many iterations.");
442 #endif
443
444 break;
445 }
446
447 polled = FALSE;
448
449 #ifdef DEBUG_TIMING
450 pa_log_debug("Reading");
451 #endif
452
453 for (;;) {
454 int err;
455 const snd_pcm_channel_area_t *areas;
456 snd_pcm_uframes_t offset, frames;
457 pa_memchunk chunk;
458 void *p;
459 snd_pcm_sframes_t sframes;
460
461 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
462
463 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
464
465 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
466
467 if (!after_avail && err == -EAGAIN)
468 break;
469
470 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
471 continue;
472
473 return r;
474 }
475
476 /* Make sure that if these memblocks need to be copied they will fit into one slot */
477 if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
478 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
479
480 if (!after_avail && frames == 0)
481 break;
482
483 pa_assert(frames > 0);
484 after_avail = FALSE;
485
486 /* Check these are multiples of 8 bit */
487 pa_assert((areas[0].first & 7) == 0);
488 pa_assert((areas[0].step & 7)== 0);
489
490 /* We assume a single interleaved memory buffer */
491 pa_assert((areas[0].first >> 3) == 0);
492 pa_assert((areas[0].step >> 3) == u->frame_size);
493
494 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
495
496 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
497 chunk.length = pa_memblock_get_length(chunk.memblock);
498 chunk.index = 0;
499
500 pa_source_post(u->source, &chunk);
501 pa_memblock_unref_fixed(chunk.memblock);
502
503 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
504
505 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
506 continue;
507
508 return r;
509 }
510
511 work_done = TRUE;
512
513 u->read_count += frames * u->frame_size;
514
515 #ifdef DEBUG_TIMING
516 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
517 #endif
518
519 if ((size_t) frames * u->frame_size >= n_bytes)
520 break;
521
522 n_bytes -= (size_t) frames * u->frame_size;
523 }
524 }
525
526 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
527
528 if (*sleep_usec > process_usec)
529 *sleep_usec -= process_usec;
530 else
531 *sleep_usec = 0;
532
533 return work_done ? 1 : 0;
534 }
535
536 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
537 int work_done = FALSE;
538 pa_usec_t max_sleep_usec = 0, process_usec = 0;
539 size_t left_to_record;
540 unsigned j = 0;
541
542 pa_assert(u);
543 pa_source_assert_ref(u->source);
544
545 if (u->use_tsched)
546 hw_sleep_time(u, &max_sleep_usec, &process_usec);
547
548 for (;;) {
549 snd_pcm_sframes_t n;
550 size_t n_bytes;
551 int r;
552 pa_bool_t after_avail = TRUE;
553
554 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
555
556 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
557 continue;
558
559 return r;
560 }
561
562 n_bytes = (size_t) n * u->frame_size;
563 left_to_record = check_left_to_record(u, n_bytes);
564
565 if (u->use_tsched)
566 if (!polled &&
567 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
568 break;
569
570 if (PA_UNLIKELY(n_bytes <= 0)) {
571
572 if (polled)
573 PA_ONCE_BEGIN {
574 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
575 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
576 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
577 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
578 pa_strnull(dn));
579 pa_xfree(dn);
580 } PA_ONCE_END;
581
582 break;
583 }
584
585 if (++j > 10) {
586 #ifdef DEBUG_TIMING
587 pa_log_debug("Not filling up, because already too many iterations.");
588 #endif
589
590 break;
591 }
592
593 polled = FALSE;
594
595 for (;;) {
596 void *p;
597 snd_pcm_sframes_t frames;
598 pa_memchunk chunk;
599
600 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
601
602 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
603
604 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
605 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
606
607 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
608
609 p = pa_memblock_acquire(chunk.memblock);
610 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
611 pa_memblock_release(chunk.memblock);
612
613 if (PA_UNLIKELY(frames < 0)) {
614 pa_memblock_unref(chunk.memblock);
615
616 if (!after_avail && (int) frames == -EAGAIN)
617 break;
618
619 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
620 continue;
621
622 return r;
623 }
624
625 if (!after_avail && frames == 0) {
626 pa_memblock_unref(chunk.memblock);
627 break;
628 }
629
630 pa_assert(frames > 0);
631 after_avail = FALSE;
632
633 chunk.index = 0;
634 chunk.length = (size_t) frames * u->frame_size;
635
636 pa_source_post(u->source, &chunk);
637 pa_memblock_unref(chunk.memblock);
638
639 work_done = TRUE;
640
641 u->read_count += frames * u->frame_size;
642
643 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
644
645 if ((size_t) frames * u->frame_size >= n_bytes)
646 break;
647
648 n_bytes -= (size_t) frames * u->frame_size;
649 }
650 }
651
652 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
653
654 if (*sleep_usec > process_usec)
655 *sleep_usec -= process_usec;
656 else
657 *sleep_usec = 0;
658
659 return work_done ? 1 : 0;
660 }
661
662 static void update_smoother(struct userdata *u) {
663 snd_pcm_sframes_t delay = 0;
664 uint64_t position;
665 int err;
666 pa_usec_t now1 = 0, now2;
667 snd_pcm_status_t *status;
668
669 snd_pcm_status_alloca(&status);
670
671 pa_assert(u);
672 pa_assert(u->pcm_handle);
673
674 /* Let's update the time smoother */
675
676 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
677 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
678 return;
679 }
680
681 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
682 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
683 else {
684 snd_htimestamp_t htstamp = { 0, 0 };
685 snd_pcm_status_get_htstamp(status, &htstamp);
686 now1 = pa_timespec_load(&htstamp);
687 }
688
689 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
690
691 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
692 if (now1 <= 0)
693 now1 = pa_rtclock_now();
694
695 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
696
697 pa_smoother_put(u->smoother, now1, now2);
698 }
699
700 static pa_usec_t source_get_latency(struct userdata *u) {
701 int64_t delay;
702 pa_usec_t now1, now2;
703
704 pa_assert(u);
705
706 now1 = pa_rtclock_now();
707 now2 = pa_smoother_get(u->smoother, now1);
708
709 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
710
711 return delay >= 0 ? (pa_usec_t) delay : 0;
712 }
713
714 static int build_pollfd(struct userdata *u) {
715 pa_assert(u);
716 pa_assert(u->pcm_handle);
717
718 if (u->alsa_rtpoll_item)
719 pa_rtpoll_item_free(u->alsa_rtpoll_item);
720
721 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
722 return -1;
723
724 return 0;
725 }
726
727 static int suspend(struct userdata *u) {
728 pa_assert(u);
729 pa_assert(u->pcm_handle);
730
731 pa_smoother_pause(u->smoother, pa_rtclock_now());
732
733 /* Let's suspend */
734 snd_pcm_close(u->pcm_handle);
735 u->pcm_handle = NULL;
736
737 if (u->alsa_rtpoll_item) {
738 pa_rtpoll_item_free(u->alsa_rtpoll_item);
739 u->alsa_rtpoll_item = NULL;
740 }
741
742 pa_log_info("Device suspended...");
743
744 return 0;
745 }
746
747 static int update_sw_params(struct userdata *u) {
748 snd_pcm_uframes_t avail_min;
749 int err;
750
751 pa_assert(u);
752
753 /* Use the full buffer if noone asked us for anything specific */
754 u->hwbuf_unused = 0;
755
756 if (u->use_tsched) {
757 pa_usec_t latency;
758
759 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
760 size_t b;
761
762 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
763
764 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
765
766 /* We need at least one sample in our buffer */
767
768 if (PA_UNLIKELY(b < u->frame_size))
769 b = u->frame_size;
770
771 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
772 }
773
774 fix_min_sleep_wakeup(u);
775 fix_tsched_watermark(u);
776 }
777
778 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
779
780 avail_min = 1;
781
782 if (u->use_tsched) {
783 pa_usec_t sleep_usec, process_usec;
784
785 hw_sleep_time(u, &sleep_usec, &process_usec);
786 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
787 }
788
789 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
790
791 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
792 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
793 return err;
794 }
795
796 return 0;
797 }
798
799 static int unsuspend(struct userdata *u) {
800 pa_sample_spec ss;
801 int err;
802 pa_bool_t b, d;
803 unsigned nfrags;
804 snd_pcm_uframes_t period_size;
805
806 pa_assert(u);
807 pa_assert(!u->pcm_handle);
808
809 pa_log_info("Trying resume...");
810
811 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
812 /*SND_PCM_NONBLOCK|*/
813 SND_PCM_NO_AUTO_RESAMPLE|
814 SND_PCM_NO_AUTO_CHANNELS|
815 SND_PCM_NO_AUTO_FORMAT)) < 0) {
816 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
817 goto fail;
818 }
819
820 ss = u->source->sample_spec;
821 nfrags = u->nfragments;
822 period_size = u->fragment_size / u->frame_size;
823 b = u->use_mmap;
824 d = u->use_tsched;
825
826 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
827 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
828 goto fail;
829 }
830
831 if (b != u->use_mmap || d != u->use_tsched) {
832 pa_log_warn("Resume failed, couldn't get original access mode.");
833 goto fail;
834 }
835
836 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
837 pa_log_warn("Resume failed, couldn't restore original sample settings.");
838 goto fail;
839 }
840
841 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
842 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
843 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
844 (unsigned long) nfrags, period_size * u->frame_size);
845 goto fail;
846 }
847
848 if (update_sw_params(u) < 0)
849 goto fail;
850
851 if (build_pollfd(u) < 0)
852 goto fail;
853
854 /* FIXME: We need to reload the volume somehow */
855
856 snd_pcm_start(u->pcm_handle);
857
858 u->read_count = 0;
859 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
860
861 pa_log_info("Resumed successfully...");
862
863 return 0;
864
865 fail:
866 if (u->pcm_handle) {
867 snd_pcm_close(u->pcm_handle);
868 u->pcm_handle = NULL;
869 }
870
871 return -1;
872 }
873
874 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
875 struct userdata *u = PA_SOURCE(o)->userdata;
876
877 switch (code) {
878
879 case PA_SOURCE_MESSAGE_GET_LATENCY: {
880 pa_usec_t r = 0;
881
882 if (u->pcm_handle)
883 r = source_get_latency(u);
884
885 *((pa_usec_t*) data) = r;
886
887 return 0;
888 }
889
890 case PA_SOURCE_MESSAGE_SET_STATE:
891
892 switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
893
894 case PA_SOURCE_SUSPENDED:
895 pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
896
897 if (suspend(u) < 0)
898 return -1;
899
900 break;
901
902 case PA_SOURCE_IDLE:
903 case PA_SOURCE_RUNNING:
904
905 if (u->source->thread_info.state == PA_SOURCE_INIT) {
906 if (build_pollfd(u) < 0)
907 return -1;
908
909 snd_pcm_start(u->pcm_handle);
910 }
911
912 if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
913 if (unsuspend(u) < 0)
914 return -1;
915 }
916
917 break;
918
919 case PA_SOURCE_UNLINKED:
920 case PA_SOURCE_INIT:
921 case PA_SOURCE_INVALID_STATE:
922 ;
923 }
924
925 break;
926 }
927
928 return pa_source_process_msg(o, code, data, offset, chunk);
929 }
930
931 /* Called from main context */
932 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
933 pa_source_state_t old_state;
934 struct userdata *u;
935
936 pa_source_assert_ref(s);
937 pa_assert_se(u = s->userdata);
938
939 old_state = pa_source_get_state(u->source);
940
941 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
942 reserve_done(u);
943 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
944 if (reserve_init(u, u->device_name) < 0)
945 return -1;
946
947 return 0;
948 }
949
950 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
951 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
952
953 pa_assert(u);
954 pa_assert(u->mixer_handle);
955
956 if (mask == SND_CTL_EVENT_MASK_REMOVE)
957 return 0;
958
959 if (mask & SND_CTL_EVENT_MASK_VALUE) {
960 pa_source_get_volume(u->source, TRUE);
961 pa_source_get_mute(u->source, TRUE);
962 }
963
964 return 0;
965 }
966
967 static void source_get_volume_cb(pa_source *s) {
968 struct userdata *u = s->userdata;
969 pa_cvolume r;
970 char t[PA_CVOLUME_SNPRINT_MAX];
971
972 pa_assert(u);
973 pa_assert(u->mixer_path);
974 pa_assert(u->mixer_handle);
975
976 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
977 return;
978
979 /* Shift down by the base volume, so that 0dB becomes maximum volume */
980 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
981
982 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
983
984 if (pa_cvolume_equal(&u->hardware_volume, &r))
985 return;
986
987 s->virtual_volume = u->hardware_volume = r;
988
989 if (u->mixer_path->has_dB) {
990 pa_cvolume reset;
991
992 /* Hmm, so the hardware volume changed, let's reset our software volume */
993 pa_cvolume_reset(&reset, s->sample_spec.channels);
994 pa_source_set_soft_volume(s, &reset);
995 }
996 }
997
998 static void source_set_volume_cb(pa_source *s) {
999 struct userdata *u = s->userdata;
1000 pa_cvolume r;
1001 char t[PA_CVOLUME_SNPRINT_MAX];
1002
1003 pa_assert(u);
1004 pa_assert(u->mixer_path);
1005 pa_assert(u->mixer_handle);
1006
1007 /* Shift up by the base volume */
1008 pa_sw_cvolume_divide_scalar(&r, &s->virtual_volume, s->base_volume);
1009
1010 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1011 return;
1012
1013 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1014 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1015
1016 u->hardware_volume = r;
1017
1018 if (u->mixer_path->has_dB) {
1019
1020 /* Match exactly what the user requested by software */
1021 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1022
1023 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1024 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1025 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1026
1027 } else {
1028 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1029
1030 /* We can't match exactly what the user requested, hence let's
1031 * at least tell the user about it */
1032
1033 s->virtual_volume = r;
1034 }
1035 }
1036
1037 static void source_get_mute_cb(pa_source *s) {
1038 struct userdata *u = s->userdata;
1039 pa_bool_t b;
1040
1041 pa_assert(u);
1042 pa_assert(u->mixer_path);
1043 pa_assert(u->mixer_handle);
1044
1045 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1046 return;
1047
1048 s->muted = b;
1049 }
1050
1051 static void source_set_mute_cb(pa_source *s) {
1052 struct userdata *u = s->userdata;
1053
1054 pa_assert(u);
1055 pa_assert(u->mixer_path);
1056 pa_assert(u->mixer_handle);
1057
1058 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1059 }
1060
1061 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1062 struct userdata *u = s->userdata;
1063 pa_alsa_port_data *data;
1064
1065 pa_assert(u);
1066 pa_assert(p);
1067 pa_assert(u->mixer_handle);
1068
1069 data = PA_DEVICE_PORT_DATA(p);
1070
1071 pa_assert_se(u->mixer_path = data->path);
1072 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1073
1074 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1075 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1076 s->n_volume_steps = PA_VOLUME_NORM+1;
1077
1078 if (u->mixer_path->max_dB > 0.0)
1079 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1080 else
1081 pa_log_info("No particular base volume set, fixing to 0 dB");
1082 } else {
1083 s->base_volume = PA_VOLUME_NORM;
1084 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1085 }
1086
1087 if (data->setting)
1088 pa_alsa_setting_select(data->setting, u->mixer_handle);
1089
1090 if (s->set_mute)
1091 s->set_mute(s);
1092 if (s->set_volume)
1093 s->set_volume(s);
1094
1095 return 0;
1096 }
1097
1098 static void source_update_requested_latency_cb(pa_source *s) {
1099 struct userdata *u = s->userdata;
1100 pa_assert(u);
1101
1102 if (!u->pcm_handle)
1103 return;
1104
1105 update_sw_params(u);
1106 }
1107
1108 static void thread_func(void *userdata) {
1109 struct userdata *u = userdata;
1110 unsigned short revents = 0;
1111
1112 pa_assert(u);
1113
1114 pa_log_debug("Thread starting up");
1115
1116 if (u->core->realtime_scheduling)
1117 pa_make_realtime(u->core->realtime_priority);
1118
1119 pa_thread_mq_install(&u->thread_mq);
1120
1121 for (;;) {
1122 int ret;
1123
1124 #ifdef DEBUG_TIMING
1125 pa_log_debug("Loop");
1126 #endif
1127
1128 /* Read some data and pass it to the sources */
1129 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1130 int work_done;
1131 pa_usec_t sleep_usec = 0;
1132
1133 if (u->use_mmap)
1134 work_done = mmap_read(u, &sleep_usec, revents & POLLIN);
1135 else
1136 work_done = unix_read(u, &sleep_usec, revents & POLLIN);
1137
1138 if (work_done < 0)
1139 goto fail;
1140
1141 /* pa_log_debug("work_done = %i", work_done); */
1142
1143 if (work_done)
1144 update_smoother(u);
1145
1146 if (u->use_tsched) {
1147 pa_usec_t cusec;
1148
1149 /* OK, the capture buffer is now empty, let's
1150 * calculate when to wake up next */
1151
1152 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1153
1154 /* Convert from the sound card time domain to the
1155 * system time domain */
1156 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1157
1158 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1159
1160 /* We don't trust the conversion, so we wake up whatever comes first */
1161 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1162 }
1163 } else if (u->use_tsched)
1164
1165 /* OK, we're in an invalid state, let's disable our timers */
1166 pa_rtpoll_set_timer_disabled(u->rtpoll);
1167
1168 /* Hmm, nothing to do. Let's sleep */
1169 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1170 goto fail;
1171
1172 if (ret == 0)
1173 goto finish;
1174
1175 /* Tell ALSA about this and process its response */
1176 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1177 struct pollfd *pollfd;
1178 int err;
1179 unsigned n;
1180
1181 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1182
1183 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1184 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1185 goto fail;
1186 }
1187
1188 if (revents & ~POLLIN) {
1189 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1190 goto fail;
1191
1192 snd_pcm_start(u->pcm_handle);
1193 } else if (revents && u->use_tsched && pa_log_ratelimit())
1194 pa_log_debug("Wakeup from ALSA!");
1195
1196 } else
1197 revents = 0;
1198 }
1199
1200 fail:
1201 /* If this was no regular exit from the loop we have to continue
1202 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1203 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1204 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1205
1206 finish:
1207 pa_log_debug("Thread shutting down");
1208 }
1209
1210 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1211 const char *n;
1212 char *t;
1213
1214 pa_assert(data);
1215 pa_assert(ma);
1216 pa_assert(device_name);
1217
1218 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1219 pa_source_new_data_set_name(data, n);
1220 data->namereg_fail = TRUE;
1221 return;
1222 }
1223
1224 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1225 data->namereg_fail = TRUE;
1226 else {
1227 n = device_id ? device_id : device_name;
1228 data->namereg_fail = FALSE;
1229 }
1230
1231 if (mapping)
1232 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1233 else
1234 t = pa_sprintf_malloc("alsa_input.%s", n);
1235
1236 pa_source_new_data_set_name(data, t);
1237 pa_xfree(t);
1238 }
1239
1240 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1241
1242 if (!mapping && !element)
1243 return;
1244
1245 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1246 pa_log_info("Failed to find a working mixer device.");
1247 return;
1248 }
1249
1250 if (element) {
1251
1252 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1253 goto fail;
1254
1255 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1256 goto fail;
1257
1258 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1259 pa_alsa_path_dump(u->mixer_path);
1260 } else {
1261
1262 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1263 goto fail;
1264
1265 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1266
1267 pa_log_debug("Probed mixer paths:");
1268 pa_alsa_path_set_dump(u->mixer_path_set);
1269 }
1270
1271 return;
1272
1273 fail:
1274
1275 if (u->mixer_path_set) {
1276 pa_alsa_path_set_free(u->mixer_path_set);
1277 u->mixer_path_set = NULL;
1278 } else if (u->mixer_path) {
1279 pa_alsa_path_free(u->mixer_path);
1280 u->mixer_path = NULL;
1281 }
1282
1283 if (u->mixer_handle) {
1284 snd_mixer_close(u->mixer_handle);
1285 u->mixer_handle = NULL;
1286 }
1287 }
1288
1289 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1290 pa_assert(u);
1291
1292 if (!u->mixer_handle)
1293 return 0;
1294
1295 if (u->source->active_port) {
1296 pa_alsa_port_data *data;
1297
1298 /* We have a list of supported paths, so let's activate the
1299 * one that has been chosen as active */
1300
1301 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1302 u->mixer_path = data->path;
1303
1304 pa_alsa_path_select(data->path, u->mixer_handle);
1305
1306 if (data->setting)
1307 pa_alsa_setting_select(data->setting, u->mixer_handle);
1308
1309 } else {
1310
1311 if (!u->mixer_path && u->mixer_path_set)
1312 u->mixer_path = u->mixer_path_set->paths;
1313
1314 if (u->mixer_path) {
1315 /* Hmm, we have only a single path, then let's activate it */
1316
1317 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1318
1319 if (u->mixer_path->settings)
1320 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1321 } else
1322 return 0;
1323 }
1324
1325 if (!u->mixer_path->has_volume)
1326 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1327 else {
1328
1329 if (u->mixer_path->has_dB) {
1330 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1331
1332 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1333 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1334
1335 if (u->mixer_path->max_dB > 0.0)
1336 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1337 else
1338 pa_log_info("No particular base volume set, fixing to 0 dB");
1339
1340 } else {
1341 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1342 u->source->base_volume = PA_VOLUME_NORM;
1343 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1344 }
1345
1346 u->source->get_volume = source_get_volume_cb;
1347 u->source->set_volume = source_set_volume_cb;
1348
1349 u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SOURCE_DECIBEL_VOLUME : 0);
1350 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1351 }
1352
1353 if (!u->mixer_path->has_mute) {
1354 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1355 } else {
1356 u->source->get_mute = source_get_mute_cb;
1357 u->source->set_mute = source_set_mute_cb;
1358 u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1359 pa_log_info("Using hardware mute control.");
1360 }
1361
1362 u->mixer_fdl = pa_alsa_fdlist_new();
1363
1364 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1365 pa_log("Failed to initialize file descriptor monitoring");
1366 return -1;
1367 }
1368
1369 if (u->mixer_path_set)
1370 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1371 else
1372 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1373
1374 return 0;
1375 }
1376
1377 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1378
1379 struct userdata *u = NULL;
1380 const char *dev_id = NULL;
1381 pa_sample_spec ss, requested_ss;
1382 pa_channel_map map;
1383 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1384 snd_pcm_uframes_t period_frames, tsched_frames;
1385 size_t frame_size;
1386 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1387 pa_source_new_data data;
1388 pa_alsa_profile_set *profile_set = NULL;
1389
1390 pa_assert(m);
1391 pa_assert(ma);
1392
1393 ss = m->core->default_sample_spec;
1394 map = m->core->default_channel_map;
1395 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1396 pa_log("Failed to parse sample specification");
1397 goto fail;
1398 }
1399
1400 requested_ss = ss;
1401 frame_size = pa_frame_size(&ss);
1402
1403 nfrags = m->core->default_n_fragments;
1404 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1405 if (frag_size <= 0)
1406 frag_size = (uint32_t) frame_size;
1407 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1408 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1409
1410 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1411 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1412 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1413 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1414 pa_log("Failed to parse buffer metrics");
1415 goto fail;
1416 }
1417
1418 hwbuf_size = frag_size * nfrags;
1419 period_frames = frag_size/frame_size;
1420 tsched_frames = tsched_size/frame_size;
1421
1422 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1423 pa_log("Failed to parse mmap argument.");
1424 goto fail;
1425 }
1426
1427 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1428 pa_log("Failed to parse timer_scheduling argument.");
1429 goto fail;
1430 }
1431
1432 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1433 pa_log("Failed to parse ignore_dB argument.");
1434 goto fail;
1435 }
1436
1437 if (use_tsched && !pa_rtclock_hrtimer()) {
1438 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1439 use_tsched = FALSE;
1440 }
1441
1442 u = pa_xnew0(struct userdata, 1);
1443 u->core = m->core;
1444 u->module = m;
1445 u->use_mmap = use_mmap;
1446 u->use_tsched = use_tsched;
1447 u->rtpoll = pa_rtpoll_new();
1448 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1449
1450 u->smoother = pa_smoother_new(
1451 DEFAULT_TSCHED_WATERMARK_USEC*2,
1452 DEFAULT_TSCHED_WATERMARK_USEC*2,
1453 TRUE,
1454 TRUE,
1455 5,
1456 pa_rtclock_now(),
1457 FALSE);
1458
1459 dev_id = pa_modargs_get_value(
1460 ma, "device_id",
1461 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1462
1463 if (reserve_init(u, dev_id) < 0)
1464 goto fail;
1465
1466 if (reserve_monitor_init(u, dev_id) < 0)
1467 goto fail;
1468
1469 b = use_mmap;
1470 d = use_tsched;
1471
1472 if (mapping) {
1473
1474 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1475 pa_log("device_id= not set");
1476 goto fail;
1477 }
1478
1479 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1480 dev_id,
1481 &u->device_name,
1482 &ss, &map,
1483 SND_PCM_STREAM_CAPTURE,
1484 &nfrags, &period_frames, tsched_frames,
1485 &b, &d, mapping)))
1486 goto fail;
1487
1488 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1489
1490 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1491 goto fail;
1492
1493 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1494 dev_id,
1495 &u->device_name,
1496 &ss, &map,
1497 SND_PCM_STREAM_CAPTURE,
1498 &nfrags, &period_frames, tsched_frames,
1499 &b, &d, profile_set, &mapping)))
1500 goto fail;
1501
1502 } else {
1503
1504 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1505 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1506 &u->device_name,
1507 &ss, &map,
1508 SND_PCM_STREAM_CAPTURE,
1509 &nfrags, &period_frames, tsched_frames,
1510 &b, &d, FALSE)))
1511 goto fail;
1512 }
1513
1514 pa_assert(u->device_name);
1515 pa_log_info("Successfully opened device %s.", u->device_name);
1516
1517 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1518 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1519 goto fail;
1520 }
1521
1522 if (mapping)
1523 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1524
1525 if (use_mmap && !b) {
1526 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1527 u->use_mmap = use_mmap = FALSE;
1528 }
1529
1530 if (use_tsched && (!b || !d)) {
1531 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1532 u->use_tsched = use_tsched = FALSE;
1533 }
1534
1535 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1536 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1537 u->use_tsched = use_tsched = FALSE;
1538 }
1539
1540 if (u->use_mmap)
1541 pa_log_info("Successfully enabled mmap() mode.");
1542
1543 if (u->use_tsched)
1544 pa_log_info("Successfully enabled timer-based scheduling mode.");
1545
1546 /* ALSA might tweak the sample spec, so recalculate the frame size */
1547 frame_size = pa_frame_size(&ss);
1548
1549 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1550
1551 pa_source_new_data_init(&data);
1552 data.driver = driver;
1553 data.module = m;
1554 data.card = card;
1555 set_source_name(&data, ma, dev_id, u->device_name, mapping);
1556 pa_source_new_data_set_sample_spec(&data, &ss);
1557 pa_source_new_data_set_channel_map(&data, &map);
1558
1559 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1560 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1561 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1562 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1563 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1564
1565 if (mapping) {
1566 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1567 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1568 }
1569
1570 pa_alsa_init_description(data.proplist);
1571
1572 if (u->control_device)
1573 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1574
1575 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1576 pa_log("Invalid properties");
1577 pa_source_new_data_done(&data);
1578 goto fail;
1579 }
1580
1581 if (u->mixer_path_set)
1582 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1583
1584 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1585 pa_source_new_data_done(&data);
1586
1587 if (!u->source) {
1588 pa_log("Failed to create source object");
1589 goto fail;
1590 }
1591
1592 u->source->parent.process_msg = source_process_msg;
1593 u->source->update_requested_latency = source_update_requested_latency_cb;
1594 u->source->set_state = source_set_state_cb;
1595 u->source->set_port = source_set_port_cb;
1596 u->source->userdata = u;
1597
1598 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1599 pa_source_set_rtpoll(u->source, u->rtpoll);
1600
1601 u->frame_size = frame_size;
1602 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1603 u->nfragments = nfrags;
1604 u->hwbuf_size = u->fragment_size * nfrags;
1605 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1606 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1607
1608 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1609 nfrags, (long unsigned) u->fragment_size,
1610 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1611
1612 if (u->use_tsched) {
1613 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->source->sample_spec);
1614
1615 fix_min_sleep_wakeup(u);
1616 fix_tsched_watermark(u);
1617
1618 pa_source_set_latency_range(u->source,
1619 0,
1620 pa_bytes_to_usec(u->hwbuf_size, &ss));
1621
1622 pa_log_info("Time scheduling watermark is %0.2fms",
1623 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1624 } else
1625 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1626
1627 reserve_update(u);
1628
1629 if (update_sw_params(u) < 0)
1630 goto fail;
1631
1632 if (setup_mixer(u, ignore_dB) < 0)
1633 goto fail;
1634
1635 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1636
1637 if (!(u->thread = pa_thread_new(thread_func, u))) {
1638 pa_log("Failed to create thread.");
1639 goto fail;
1640 }
1641 /* Get initial mixer settings */
1642 if (data.volume_is_set) {
1643 if (u->source->set_volume)
1644 u->source->set_volume(u->source);
1645 } else {
1646 if (u->source->get_volume)
1647 u->source->get_volume(u->source);
1648 }
1649
1650 if (data.muted_is_set) {
1651 if (u->source->set_mute)
1652 u->source->set_mute(u->source);
1653 } else {
1654 if (u->source->get_mute)
1655 u->source->get_mute(u->source);
1656 }
1657
1658 pa_source_put(u->source);
1659
1660 if (profile_set)
1661 pa_alsa_profile_set_free(profile_set);
1662
1663 return u->source;
1664
1665 fail:
1666
1667 if (u)
1668 userdata_free(u);
1669
1670 if (profile_set)
1671 pa_alsa_profile_set_free(profile_set);
1672
1673 return NULL;
1674 }
1675
1676 static void userdata_free(struct userdata *u) {
1677 pa_assert(u);
1678
1679 if (u->source)
1680 pa_source_unlink(u->source);
1681
1682 if (u->thread) {
1683 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1684 pa_thread_free(u->thread);
1685 }
1686
1687 pa_thread_mq_done(&u->thread_mq);
1688
1689 if (u->source)
1690 pa_source_unref(u->source);
1691
1692 if (u->alsa_rtpoll_item)
1693 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1694
1695 if (u->rtpoll)
1696 pa_rtpoll_free(u->rtpoll);
1697
1698 if (u->pcm_handle) {
1699 snd_pcm_drop(u->pcm_handle);
1700 snd_pcm_close(u->pcm_handle);
1701 }
1702
1703 if (u->mixer_fdl)
1704 pa_alsa_fdlist_free(u->mixer_fdl);
1705
1706 if (u->mixer_path_set)
1707 pa_alsa_path_set_free(u->mixer_path_set);
1708 else if (u->mixer_path)
1709 pa_alsa_path_free(u->mixer_path);
1710
1711 if (u->mixer_handle)
1712 snd_mixer_close(u->mixer_handle);
1713
1714 if (u->smoother)
1715 pa_smoother_free(u->smoother);
1716
1717 reserve_done(u);
1718 monitor_done(u);
1719
1720 pa_xfree(u->device_name);
1721 pa_xfree(u->control_device);
1722 pa_xfree(u);
1723 }
1724
1725 void pa_alsa_source_free(pa_source *s) {
1726 struct userdata *u;
1727
1728 pa_source_assert_ref(s);
1729 pa_assert_se(u = s->userdata);
1730
1731 userdata_free(u);
1732 }