]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Sending translation for Portuguese
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
68 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
69 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
70
71 struct userdata {
72 pa_core *core;
73 pa_module *module;
74 pa_sink *sink;
75
76 pa_thread *thread;
77 pa_thread_mq thread_mq;
78 pa_rtpoll *rtpoll;
79
80 snd_pcm_t *pcm_handle;
81
82 pa_alsa_fdlist *mixer_fdl;
83 snd_mixer_t *mixer_handle;
84 pa_alsa_path_set *mixer_path_set;
85 pa_alsa_path *mixer_path;
86
87 pa_cvolume hardware_volume;
88
89 size_t
90 frame_size,
91 fragment_size,
92 hwbuf_size,
93 tsched_watermark,
94 hwbuf_unused,
95 min_sleep,
96 min_wakeup,
97 watermark_step;
98
99 unsigned nfragments;
100 pa_memchunk memchunk;
101
102 char *device_name; /* name of the PCM device */
103 char *control_device; /* name of the control device */
104
105 pa_bool_t use_mmap:1, use_tsched:1;
106
107 pa_bool_t first, after_rewind;
108
109 pa_rtpoll_item *alsa_rtpoll_item;
110
111 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
112
113 pa_smoother *smoother;
114 uint64_t write_count;
115 uint64_t since_start;
116
117 pa_reserve_wrapper *reserve;
118 pa_hook_slot *reserve_slot;
119 pa_reserve_monitor_wrapper *monitor;
120 pa_hook_slot *monitor_slot;
121 };
122
123 static void userdata_free(struct userdata *u);
124
125 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
126 pa_assert(r);
127 pa_assert(u);
128
129 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
130 return PA_HOOK_CANCEL;
131
132 return PA_HOOK_OK;
133 }
134
135 static void reserve_done(struct userdata *u) {
136 pa_assert(u);
137
138 if (u->reserve_slot) {
139 pa_hook_slot_free(u->reserve_slot);
140 u->reserve_slot = NULL;
141 }
142
143 if (u->reserve) {
144 pa_reserve_wrapper_unref(u->reserve);
145 u->reserve = NULL;
146 }
147 }
148
149 static void reserve_update(struct userdata *u) {
150 const char *description;
151 pa_assert(u);
152
153 if (!u->sink || !u->reserve)
154 return;
155
156 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
157 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
158 }
159
160 static int reserve_init(struct userdata *u, const char *dname) {
161 char *rname;
162
163 pa_assert(u);
164 pa_assert(dname);
165
166 if (u->reserve)
167 return 0;
168
169 if (pa_in_system_mode())
170 return 0;
171
172 if (!(rname = pa_alsa_get_reserve_name(dname)))
173 return 0;
174
175 /* We are resuming, try to lock the device */
176 u->reserve = pa_reserve_wrapper_get(u->core, rname);
177 pa_xfree(rname);
178
179 if (!(u->reserve))
180 return -1;
181
182 reserve_update(u);
183
184 pa_assert(!u->reserve_slot);
185 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
186
187 return 0;
188 }
189
190 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
191 pa_bool_t b;
192
193 pa_assert(w);
194 pa_assert(u);
195
196 b = PA_PTR_TO_UINT(busy) && !u->reserve;
197
198 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
199 return PA_HOOK_OK;
200 }
201
202 static void monitor_done(struct userdata *u) {
203 pa_assert(u);
204
205 if (u->monitor_slot) {
206 pa_hook_slot_free(u->monitor_slot);
207 u->monitor_slot = NULL;
208 }
209
210 if (u->monitor) {
211 pa_reserve_monitor_wrapper_unref(u->monitor);
212 u->monitor = NULL;
213 }
214 }
215
216 static int reserve_monitor_init(struct userdata *u, const char *dname) {
217 char *rname;
218
219 pa_assert(u);
220 pa_assert(dname);
221
222 if (pa_in_system_mode())
223 return 0;
224
225 if (!(rname = pa_alsa_get_reserve_name(dname)))
226 return 0;
227
228 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
229 pa_xfree(rname);
230
231 if (!(u->monitor))
232 return -1;
233
234 pa_assert(!u->monitor_slot);
235 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
236
237 return 0;
238 }
239
240 static void fix_min_sleep_wakeup(struct userdata *u) {
241 size_t max_use, max_use_2;
242
243 pa_assert(u);
244
245 max_use = u->hwbuf_size - u->hwbuf_unused;
246 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
247
248 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
249 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
250
251 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
252 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
253 }
254
255 static void fix_tsched_watermark(struct userdata *u) {
256 size_t max_use;
257 pa_assert(u);
258
259 max_use = u->hwbuf_size - u->hwbuf_unused;
260
261 if (u->tsched_watermark > max_use - u->min_sleep)
262 u->tsched_watermark = max_use - u->min_sleep;
263
264 if (u->tsched_watermark < u->min_wakeup)
265 u->tsched_watermark = u->min_wakeup;
266 }
267
268 static void adjust_after_underrun(struct userdata *u) {
269 size_t old_watermark;
270 pa_usec_t old_min_latency, new_min_latency;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 /* First, just try to increase the watermark */
276 old_watermark = u->tsched_watermark;
277 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
278 fix_tsched_watermark(u);
279
280 if (old_watermark != u->tsched_watermark) {
281 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
282 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
283 return;
284 }
285
286 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
287 old_min_latency = u->sink->thread_info.min_latency;
288 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
289 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
290
291 if (old_min_latency != new_min_latency) {
292 pa_log_notice("Increasing minimal latency to %0.2f ms",
293 (double) new_min_latency / PA_USEC_PER_MSEC);
294
295 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
296 return;
297 }
298
299 /* When we reach this we're officialy fucked! */
300 }
301
302 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
303 pa_usec_t usec, wm;
304
305 pa_assert(sleep_usec);
306 pa_assert(process_usec);
307
308 pa_assert(u);
309
310 usec = pa_sink_get_requested_latency_within_thread(u->sink);
311
312 if (usec == (pa_usec_t) -1)
313 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
314
315 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
316
317 if (wm > usec)
318 wm = usec/2;
319
320 *sleep_usec = usec - wm;
321 *process_usec = wm;
322
323 #ifdef DEBUG_TIMING
324 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
325 (unsigned long) (usec / PA_USEC_PER_MSEC),
326 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
327 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
328 #endif
329 }
330
331 static int try_recover(struct userdata *u, const char *call, int err) {
332 pa_assert(u);
333 pa_assert(call);
334 pa_assert(err < 0);
335
336 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
337
338 pa_assert(err != -EAGAIN);
339
340 if (err == -EPIPE)
341 pa_log_debug("%s: Buffer underrun!", call);
342
343 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
344 pa_log("%s: %s", call, pa_alsa_strerror(err));
345 return -1;
346 }
347
348 u->first = TRUE;
349 u->since_start = 0;
350 return 0;
351 }
352
353 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
354 size_t left_to_play;
355
356 /* We use <= instead of < for this check here because an underrun
357 * only happens after the last sample was processed, not already when
358 * it is removed from the buffer. This is particularly important
359 * when block transfer is used. */
360
361 if (n_bytes <= u->hwbuf_size) {
362 left_to_play = u->hwbuf_size - n_bytes;
363
364 #ifdef DEBUG_TIMING
365 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
366 #endif
367
368 } else {
369 left_to_play = 0;
370
371 #ifdef DEBUG_TIMING
372 PA_DEBUG_TRAP;
373 #endif
374
375 if (!u->first && !u->after_rewind) {
376
377 if (pa_log_ratelimit())
378 pa_log_info("Underrun!");
379
380 if (u->use_tsched)
381 adjust_after_underrun(u);
382 }
383 }
384
385 return left_to_play;
386 }
387
388 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
389 pa_bool_t work_done = TRUE;
390 pa_usec_t max_sleep_usec = 0, process_usec = 0;
391 size_t left_to_play;
392 unsigned j = 0;
393
394 pa_assert(u);
395 pa_sink_assert_ref(u->sink);
396
397 if (u->use_tsched)
398 hw_sleep_time(u, &max_sleep_usec, &process_usec);
399
400 for (;;) {
401 snd_pcm_sframes_t n;
402 size_t n_bytes;
403 int r;
404
405 /* First we determine how many samples are missing to fill the
406 * buffer up to 100% */
407
408 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
409
410 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
411 continue;
412
413 return r;
414 }
415
416 n_bytes = (size_t) n * u->frame_size;
417
418 #ifdef DEBUG_TIMING
419 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
420 #endif
421
422 left_to_play = check_left_to_play(u, n_bytes);
423
424 if (u->use_tsched)
425
426 /* We won't fill up the playback buffer before at least
427 * half the sleep time is over because otherwise we might
428 * ask for more data from the clients then they expect. We
429 * need to guarantee that clients only have to keep around
430 * a single hw buffer length. */
431
432 if (!polled &&
433 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
434 #ifdef DEBUG_TIMING
435 pa_log_debug("Not filling up, because too early.");
436 #endif
437 break;
438 }
439
440 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
441
442 if (polled)
443 PA_ONCE_BEGIN {
444 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
445 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
446 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
447 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
448 pa_strnull(dn));
449 pa_xfree(dn);
450 } PA_ONCE_END;
451
452 #ifdef DEBUG_TIMING
453 pa_log_debug("Not filling up, because not necessary.");
454 #endif
455 break;
456 }
457
458
459 if (++j > 10) {
460 #ifdef DEBUG_TIMING
461 pa_log_debug("Not filling up, because already too many iterations.");
462 #endif
463
464 break;
465 }
466
467 n_bytes -= u->hwbuf_unused;
468 polled = FALSE;
469
470 #ifdef DEBUG_TIMING
471 pa_log_debug("Filling up");
472 #endif
473
474 for (;;) {
475 pa_memchunk chunk;
476 void *p;
477 int err;
478 const snd_pcm_channel_area_t *areas;
479 snd_pcm_uframes_t offset, frames;
480 snd_pcm_sframes_t sframes;
481
482 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
483 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
484
485 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
486
487 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
488 continue;
489
490 return r;
491 }
492
493 /* Make sure that if these memblocks need to be copied they will fit into one slot */
494 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
495 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
496
497 if (frames == 0)
498 break;
499
500 /* Check these are multiples of 8 bit */
501 pa_assert((areas[0].first & 7) == 0);
502 pa_assert((areas[0].step & 7)== 0);
503
504 /* We assume a single interleaved memory buffer */
505 pa_assert((areas[0].first >> 3) == 0);
506 pa_assert((areas[0].step >> 3) == u->frame_size);
507
508 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
509
510 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
511 chunk.length = pa_memblock_get_length(chunk.memblock);
512 chunk.index = 0;
513
514 pa_sink_render_into_full(u->sink, &chunk);
515 pa_memblock_unref_fixed(chunk.memblock);
516
517 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
518
519 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
520 continue;
521
522 return r;
523 }
524
525 work_done = TRUE;
526
527 u->write_count += frames * u->frame_size;
528 u->since_start += frames * u->frame_size;
529
530 #ifdef DEBUG_TIMING
531 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
532 #endif
533
534 if ((size_t) frames * u->frame_size >= n_bytes)
535 break;
536
537 n_bytes -= (size_t) frames * u->frame_size;
538 }
539 }
540
541 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
542
543 if (*sleep_usec > process_usec)
544 *sleep_usec -= process_usec;
545 else
546 *sleep_usec = 0;
547
548 return work_done ? 1 : 0;
549 }
550
551 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
552 pa_bool_t work_done = FALSE;
553 pa_usec_t max_sleep_usec = 0, process_usec = 0;
554 size_t left_to_play;
555 unsigned j = 0;
556
557 pa_assert(u);
558 pa_sink_assert_ref(u->sink);
559
560 if (u->use_tsched)
561 hw_sleep_time(u, &max_sleep_usec, &process_usec);
562
563 for (;;) {
564 snd_pcm_sframes_t n;
565 size_t n_bytes;
566 int r;
567
568 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
569
570 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
571 continue;
572
573 return r;
574 }
575
576 n_bytes = (size_t) n * u->frame_size;
577 left_to_play = check_left_to_play(u, n_bytes);
578
579 if (u->use_tsched)
580
581 /* We won't fill up the playback buffer before at least
582 * half the sleep time is over because otherwise we might
583 * ask for more data from the clients then they expect. We
584 * need to guarantee that clients only have to keep around
585 * a single hw buffer length. */
586
587 if (!polled &&
588 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
589 break;
590
591 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
592
593 if (polled)
594 PA_ONCE_BEGIN {
595 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
596 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
597 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
598 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
599 pa_strnull(dn));
600 pa_xfree(dn);
601 } PA_ONCE_END;
602
603 break;
604 }
605
606 if (++j > 10) {
607 #ifdef DEBUG_TIMING
608 pa_log_debug("Not filling up, because already too many iterations.");
609 #endif
610
611 break;
612 }
613
614 n_bytes -= u->hwbuf_unused;
615 polled = FALSE;
616
617 for (;;) {
618 snd_pcm_sframes_t frames;
619 void *p;
620
621 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
622
623 if (u->memchunk.length <= 0)
624 pa_sink_render(u->sink, n_bytes, &u->memchunk);
625
626 pa_assert(u->memchunk.length > 0);
627
628 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
629
630 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
631 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
632
633 p = pa_memblock_acquire(u->memchunk.memblock);
634 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
635 pa_memblock_release(u->memchunk.memblock);
636
637 if (frames == 0)
638 break;
639
640 if (PA_UNLIKELY(frames < 0)) {
641
642 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
643 continue;
644
645 return r;
646 }
647
648 u->memchunk.index += (size_t) frames * u->frame_size;
649 u->memchunk.length -= (size_t) frames * u->frame_size;
650
651 if (u->memchunk.length <= 0) {
652 pa_memblock_unref(u->memchunk.memblock);
653 pa_memchunk_reset(&u->memchunk);
654 }
655
656 work_done = TRUE;
657
658 u->write_count += frames * u->frame_size;
659 u->since_start += frames * u->frame_size;
660
661 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
662
663 if ((size_t) frames * u->frame_size >= n_bytes)
664 break;
665
666 n_bytes -= (size_t) frames * u->frame_size;
667 }
668 }
669
670 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
671
672 if (*sleep_usec > process_usec)
673 *sleep_usec -= process_usec;
674 else
675 *sleep_usec = 0;
676
677 return work_done ? 1 : 0;
678 }
679
680 static void update_smoother(struct userdata *u) {
681 snd_pcm_sframes_t delay = 0;
682 int64_t position;
683 int err;
684 pa_usec_t now1 = 0, now2;
685 snd_pcm_status_t *status;
686
687 snd_pcm_status_alloca(&status);
688
689 pa_assert(u);
690 pa_assert(u->pcm_handle);
691
692 /* Let's update the time smoother */
693
694 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
695 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
696 return;
697 }
698
699 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
700 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
701 else {
702 snd_htimestamp_t htstamp = { 0, 0 };
703 snd_pcm_status_get_htstamp(status, &htstamp);
704 now1 = pa_timespec_load(&htstamp);
705 }
706
707 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
708
709 if (PA_UNLIKELY(position < 0))
710 position = 0;
711
712 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
713 if (now1 <= 0)
714 now1 = pa_rtclock_now();
715
716 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
717
718 pa_smoother_put(u->smoother, now1, now2);
719 }
720
721 static pa_usec_t sink_get_latency(struct userdata *u) {
722 pa_usec_t r;
723 int64_t delay;
724 pa_usec_t now1, now2;
725
726 pa_assert(u);
727
728 now1 = pa_rtclock_now();
729 now2 = pa_smoother_get(u->smoother, now1);
730
731 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
732
733 r = delay >= 0 ? (pa_usec_t) delay : 0;
734
735 if (u->memchunk.memblock)
736 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
737
738 return r;
739 }
740
741 static int build_pollfd(struct userdata *u) {
742 pa_assert(u);
743 pa_assert(u->pcm_handle);
744
745 if (u->alsa_rtpoll_item)
746 pa_rtpoll_item_free(u->alsa_rtpoll_item);
747
748 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
749 return -1;
750
751 return 0;
752 }
753
754 /* Called from IO context */
755 static int suspend(struct userdata *u) {
756 pa_assert(u);
757 pa_assert(u->pcm_handle);
758
759 pa_smoother_pause(u->smoother, pa_rtclock_now());
760
761 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
762 * take awfully long with our long buffer sizes today. */
763 snd_pcm_close(u->pcm_handle);
764 u->pcm_handle = NULL;
765
766 if (u->alsa_rtpoll_item) {
767 pa_rtpoll_item_free(u->alsa_rtpoll_item);
768 u->alsa_rtpoll_item = NULL;
769 }
770
771 pa_log_info("Device suspended...");
772
773 return 0;
774 }
775
776 /* Called from IO context */
777 static int update_sw_params(struct userdata *u) {
778 snd_pcm_uframes_t avail_min;
779 int err;
780
781 pa_assert(u);
782
783 /* Use the full buffer if noone asked us for anything specific */
784 u->hwbuf_unused = 0;
785
786 if (u->use_tsched) {
787 pa_usec_t latency;
788
789 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
790 size_t b;
791
792 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
793
794 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
795
796 /* We need at least one sample in our buffer */
797
798 if (PA_UNLIKELY(b < u->frame_size))
799 b = u->frame_size;
800
801 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
802 }
803
804 fix_min_sleep_wakeup(u);
805 fix_tsched_watermark(u);
806 }
807
808 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
809
810 /* We need at last one frame in the used part of the buffer */
811 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
812
813 if (u->use_tsched) {
814 pa_usec_t sleep_usec, process_usec;
815
816 hw_sleep_time(u, &sleep_usec, &process_usec);
817 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
818 }
819
820 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
821
822 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
823 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
824 return err;
825 }
826
827 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
828
829 return 0;
830 }
831
832 /* Called from IO context */
833 static int unsuspend(struct userdata *u) {
834 pa_sample_spec ss;
835 int err;
836 pa_bool_t b, d;
837 unsigned nfrags;
838 snd_pcm_uframes_t period_size;
839
840 pa_assert(u);
841 pa_assert(!u->pcm_handle);
842
843 pa_log_info("Trying resume...");
844
845 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
846 /*SND_PCM_NONBLOCK|*/
847 SND_PCM_NO_AUTO_RESAMPLE|
848 SND_PCM_NO_AUTO_CHANNELS|
849 SND_PCM_NO_AUTO_FORMAT)) < 0) {
850 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
851 goto fail;
852 }
853
854 ss = u->sink->sample_spec;
855 nfrags = u->nfragments;
856 period_size = u->fragment_size / u->frame_size;
857 b = u->use_mmap;
858 d = u->use_tsched;
859
860 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
861 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
862 goto fail;
863 }
864
865 if (b != u->use_mmap || d != u->use_tsched) {
866 pa_log_warn("Resume failed, couldn't get original access mode.");
867 goto fail;
868 }
869
870 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
871 pa_log_warn("Resume failed, couldn't restore original sample settings.");
872 goto fail;
873 }
874
875 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
876 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
877 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
878 (unsigned long) nfrags, period_size * u->frame_size);
879 goto fail;
880 }
881
882 if (update_sw_params(u) < 0)
883 goto fail;
884
885 if (build_pollfd(u) < 0)
886 goto fail;
887
888 u->first = TRUE;
889 u->since_start = 0;
890
891 pa_log_info("Resumed successfully...");
892
893 return 0;
894
895 fail:
896 if (u->pcm_handle) {
897 snd_pcm_close(u->pcm_handle);
898 u->pcm_handle = NULL;
899 }
900
901 return -1;
902 }
903
904 /* Called from IO context */
905 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
906 struct userdata *u = PA_SINK(o)->userdata;
907
908 switch (code) {
909
910 case PA_SINK_MESSAGE_GET_LATENCY: {
911 pa_usec_t r = 0;
912
913 if (u->pcm_handle)
914 r = sink_get_latency(u);
915
916 *((pa_usec_t*) data) = r;
917
918 return 0;
919 }
920
921 case PA_SINK_MESSAGE_SET_STATE:
922
923 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
924
925 case PA_SINK_SUSPENDED:
926 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
927
928 if (suspend(u) < 0)
929 return -1;
930
931 break;
932
933 case PA_SINK_IDLE:
934 case PA_SINK_RUNNING:
935
936 if (u->sink->thread_info.state == PA_SINK_INIT) {
937 if (build_pollfd(u) < 0)
938 return -1;
939 }
940
941 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
942 if (unsuspend(u) < 0)
943 return -1;
944 }
945
946 break;
947
948 case PA_SINK_UNLINKED:
949 case PA_SINK_INIT:
950 case PA_SINK_INVALID_STATE:
951 ;
952 }
953
954 break;
955 }
956
957 return pa_sink_process_msg(o, code, data, offset, chunk);
958 }
959
960 /* Called from main context */
961 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
962 pa_sink_state_t old_state;
963 struct userdata *u;
964
965 pa_sink_assert_ref(s);
966 pa_assert_se(u = s->userdata);
967
968 old_state = pa_sink_get_state(u->sink);
969
970 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
971 reserve_done(u);
972 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
973 if (reserve_init(u, u->device_name) < 0)
974 return -1;
975
976 return 0;
977 }
978
979 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
980 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
981
982 pa_assert(u);
983 pa_assert(u->mixer_handle);
984
985 if (mask == SND_CTL_EVENT_MASK_REMOVE)
986 return 0;
987
988 if (mask & SND_CTL_EVENT_MASK_VALUE) {
989 pa_sink_get_volume(u->sink, TRUE, FALSE);
990 pa_sink_get_mute(u->sink, TRUE);
991 }
992
993 return 0;
994 }
995
996 static void sink_get_volume_cb(pa_sink *s) {
997 struct userdata *u = s->userdata;
998 pa_cvolume r;
999 char t[PA_CVOLUME_SNPRINT_MAX];
1000
1001 pa_assert(u);
1002 pa_assert(u->mixer_path);
1003 pa_assert(u->mixer_handle);
1004
1005 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1006 return;
1007
1008 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1009 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1010
1011 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1012
1013 if (pa_cvolume_equal(&u->hardware_volume, &r))
1014 return;
1015
1016 s->virtual_volume = u->hardware_volume = r;
1017
1018 if (u->mixer_path->has_dB) {
1019 pa_cvolume reset;
1020
1021 /* Hmm, so the hardware volume changed, let's reset our software volume */
1022 pa_cvolume_reset(&reset, s->sample_spec.channels);
1023 pa_sink_set_soft_volume(s, &reset);
1024 }
1025 }
1026
1027 static void sink_set_volume_cb(pa_sink *s) {
1028 struct userdata *u = s->userdata;
1029 pa_cvolume r;
1030 char t[PA_CVOLUME_SNPRINT_MAX];
1031
1032 pa_assert(u);
1033 pa_assert(u->mixer_path);
1034 pa_assert(u->mixer_handle);
1035
1036 /* Shift up by the base volume */
1037 pa_sw_cvolume_divide_scalar(&r, &s->virtual_volume, s->base_volume);
1038
1039 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1040 return;
1041
1042 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1043 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1044
1045 u->hardware_volume = r;
1046
1047 if (u->mixer_path->has_dB) {
1048
1049 /* Match exactly what the user requested by software */
1050 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1051
1052 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1053 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1054 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1055
1056 } else {
1057 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1058
1059 /* We can't match exactly what the user requested, hence let's
1060 * at least tell the user about it */
1061
1062 s->virtual_volume = r;
1063 }
1064 }
1065
1066 static void sink_get_mute_cb(pa_sink *s) {
1067 struct userdata *u = s->userdata;
1068 pa_bool_t b;
1069
1070 pa_assert(u);
1071 pa_assert(u->mixer_path);
1072 pa_assert(u->mixer_handle);
1073
1074 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1075 return;
1076
1077 s->muted = b;
1078 }
1079
1080 static void sink_set_mute_cb(pa_sink *s) {
1081 struct userdata *u = s->userdata;
1082
1083 pa_assert(u);
1084 pa_assert(u->mixer_path);
1085 pa_assert(u->mixer_handle);
1086
1087 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1088 }
1089
1090 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1091 struct userdata *u = s->userdata;
1092 pa_alsa_port_data *data;
1093
1094 pa_assert(u);
1095 pa_assert(p);
1096 pa_assert(u->mixer_handle);
1097
1098 data = PA_DEVICE_PORT_DATA(p);
1099
1100 pa_assert_se(u->mixer_path = data->path);
1101 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1102
1103 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1104 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1105 s->n_volume_steps = PA_VOLUME_NORM+1;
1106
1107 if (u->mixer_path->max_dB > 0.0)
1108 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1109 else
1110 pa_log_info("No particular base volume set, fixing to 0 dB");
1111 } else {
1112 s->base_volume = PA_VOLUME_NORM;
1113 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1114 }
1115
1116 if (data->setting)
1117 pa_alsa_setting_select(data->setting, u->mixer_handle);
1118
1119 if (s->set_mute)
1120 s->set_mute(s);
1121 if (s->set_volume)
1122 s->set_volume(s);
1123
1124 return 0;
1125 }
1126
1127 static void sink_update_requested_latency_cb(pa_sink *s) {
1128 struct userdata *u = s->userdata;
1129 size_t before;
1130 pa_assert(u);
1131
1132 if (!u->pcm_handle)
1133 return;
1134
1135 before = u->hwbuf_unused;
1136 update_sw_params(u);
1137
1138 /* Let's check whether we now use only a smaller part of the
1139 buffer then before. If so, we need to make sure that subsequent
1140 rewinds are relative to the new maximum fill level and not to the
1141 current fill level. Thus, let's do a full rewind once, to clear
1142 things up. */
1143
1144 if (u->hwbuf_unused > before) {
1145 pa_log_debug("Requesting rewind due to latency change.");
1146 pa_sink_request_rewind(s, (size_t) -1);
1147 }
1148 }
1149
1150 static int process_rewind(struct userdata *u) {
1151 snd_pcm_sframes_t unused;
1152 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1153 pa_assert(u);
1154
1155 /* Figure out how much we shall rewind and reset the counter */
1156 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1157
1158 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1159
1160 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1161 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1162 return -1;
1163 }
1164
1165 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1166
1167 if (u->hwbuf_size > unused_nbytes)
1168 limit_nbytes = u->hwbuf_size - unused_nbytes;
1169 else
1170 limit_nbytes = 0;
1171
1172 if (rewind_nbytes > limit_nbytes)
1173 rewind_nbytes = limit_nbytes;
1174
1175 if (rewind_nbytes > 0) {
1176 snd_pcm_sframes_t in_frames, out_frames;
1177
1178 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1179
1180 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1181 pa_log_debug("before: %lu", (unsigned long) in_frames);
1182 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1183 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1184 return -1;
1185 }
1186 pa_log_debug("after: %lu", (unsigned long) out_frames);
1187
1188 rewind_nbytes = (size_t) out_frames * u->frame_size;
1189
1190 if (rewind_nbytes <= 0)
1191 pa_log_info("Tried rewind, but was apparently not possible.");
1192 else {
1193 u->write_count -= out_frames * u->frame_size;
1194 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1195 pa_sink_process_rewind(u->sink, rewind_nbytes);
1196
1197 u->after_rewind = TRUE;
1198 return 0;
1199 }
1200 } else
1201 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1202
1203 pa_sink_process_rewind(u->sink, 0);
1204 return 0;
1205 }
1206
1207 static void thread_func(void *userdata) {
1208 struct userdata *u = userdata;
1209 unsigned short revents = 0;
1210
1211 pa_assert(u);
1212
1213 pa_log_debug("Thread starting up");
1214
1215 if (u->core->realtime_scheduling)
1216 pa_make_realtime(u->core->realtime_priority);
1217
1218 pa_thread_mq_install(&u->thread_mq);
1219
1220 for (;;) {
1221 int ret;
1222
1223 #ifdef DEBUG_TIMING
1224 pa_log_debug("Loop");
1225 #endif
1226
1227 /* Render some data and write it to the dsp */
1228 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1229 int work_done;
1230 pa_usec_t sleep_usec = 0;
1231
1232 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1233 if (process_rewind(u) < 0)
1234 goto fail;
1235
1236 if (u->use_mmap)
1237 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1238 else
1239 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1240
1241 if (work_done < 0)
1242 goto fail;
1243
1244 /* pa_log_debug("work_done = %i", work_done); */
1245
1246 if (work_done) {
1247
1248 if (u->first) {
1249 pa_log_info("Starting playback.");
1250 snd_pcm_start(u->pcm_handle);
1251
1252 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1253 }
1254
1255 update_smoother(u);
1256 }
1257
1258 if (u->use_tsched) {
1259 pa_usec_t cusec;
1260
1261 if (u->since_start <= u->hwbuf_size) {
1262
1263 /* USB devices on ALSA seem to hit a buffer
1264 * underrun during the first iterations much
1265 * quicker then we calculate here, probably due to
1266 * the transport latency. To accommodate for that
1267 * we artificially decrease the sleep time until
1268 * we have filled the buffer at least once
1269 * completely.*/
1270
1271 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1272 sleep_usec /= 2;
1273 }
1274
1275 /* OK, the playback buffer is now full, let's
1276 * calculate when to wake up next */
1277 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1278
1279 /* Convert from the sound card time domain to the
1280 * system time domain */
1281 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1282
1283 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1284
1285 /* We don't trust the conversion, so we wake up whatever comes first */
1286 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1287 }
1288
1289 u->first = FALSE;
1290 u->after_rewind = FALSE;
1291
1292 } else if (u->use_tsched)
1293
1294 /* OK, we're in an invalid state, let's disable our timers */
1295 pa_rtpoll_set_timer_disabled(u->rtpoll);
1296
1297 /* Hmm, nothing to do. Let's sleep */
1298 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1299 goto fail;
1300
1301 if (ret == 0)
1302 goto finish;
1303
1304 /* Tell ALSA about this and process its response */
1305 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1306 struct pollfd *pollfd;
1307 int err;
1308 unsigned n;
1309
1310 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1311
1312 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1313 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1314 goto fail;
1315 }
1316
1317 if (revents & ~POLLOUT) {
1318 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1319 goto fail;
1320
1321 u->first = TRUE;
1322 u->since_start = 0;
1323 } else if (revents && u->use_tsched && pa_log_ratelimit())
1324 pa_log_debug("Wakeup from ALSA!");
1325
1326 } else
1327 revents = 0;
1328 }
1329
1330 fail:
1331 /* If this was no regular exit from the loop we have to continue
1332 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1333 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1334 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1335
1336 finish:
1337 pa_log_debug("Thread shutting down");
1338 }
1339
1340 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1341 const char *n;
1342 char *t;
1343
1344 pa_assert(data);
1345 pa_assert(ma);
1346 pa_assert(device_name);
1347
1348 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1349 pa_sink_new_data_set_name(data, n);
1350 data->namereg_fail = TRUE;
1351 return;
1352 }
1353
1354 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1355 data->namereg_fail = TRUE;
1356 else {
1357 n = device_id ? device_id : device_name;
1358 data->namereg_fail = FALSE;
1359 }
1360
1361 if (mapping)
1362 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1363 else
1364 t = pa_sprintf_malloc("alsa_output.%s", n);
1365
1366 pa_sink_new_data_set_name(data, t);
1367 pa_xfree(t);
1368 }
1369
1370 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1371
1372 if (!mapping && !element)
1373 return;
1374
1375 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1376 pa_log_info("Failed to find a working mixer device.");
1377 return;
1378 }
1379
1380 if (element) {
1381
1382 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1383 goto fail;
1384
1385 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1386 goto fail;
1387
1388 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1389 pa_alsa_path_dump(u->mixer_path);
1390 } else {
1391
1392 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1393 goto fail;
1394
1395 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1396
1397 pa_log_debug("Probed mixer paths:");
1398 pa_alsa_path_set_dump(u->mixer_path_set);
1399 }
1400
1401 return;
1402
1403 fail:
1404
1405 if (u->mixer_path_set) {
1406 pa_alsa_path_set_free(u->mixer_path_set);
1407 u->mixer_path_set = NULL;
1408 } else if (u->mixer_path) {
1409 pa_alsa_path_free(u->mixer_path);
1410 u->mixer_path = NULL;
1411 }
1412
1413 if (u->mixer_handle) {
1414 snd_mixer_close(u->mixer_handle);
1415 u->mixer_handle = NULL;
1416 }
1417 }
1418
1419 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1420 pa_assert(u);
1421
1422 if (!u->mixer_handle)
1423 return 0;
1424
1425 if (u->sink->active_port) {
1426 pa_alsa_port_data *data;
1427
1428 /* We have a list of supported paths, so let's activate the
1429 * one that has been chosen as active */
1430
1431 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1432 u->mixer_path = data->path;
1433
1434 pa_alsa_path_select(data->path, u->mixer_handle);
1435
1436 if (data->setting)
1437 pa_alsa_setting_select(data->setting, u->mixer_handle);
1438
1439 } else {
1440
1441 if (!u->mixer_path && u->mixer_path_set)
1442 u->mixer_path = u->mixer_path_set->paths;
1443
1444 if (u->mixer_path) {
1445 /* Hmm, we have only a single path, then let's activate it */
1446
1447 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1448
1449 if (u->mixer_path->settings)
1450 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1451 } else
1452 return 0;
1453 }
1454
1455 if (!u->mixer_path->has_volume)
1456 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1457 else {
1458
1459 if (u->mixer_path->has_dB) {
1460 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1461
1462 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1463 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1464
1465 if (u->mixer_path->max_dB > 0.0)
1466 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1467 else
1468 pa_log_info("No particular base volume set, fixing to 0 dB");
1469
1470 } else {
1471 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1472 u->sink->base_volume = PA_VOLUME_NORM;
1473 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1474 }
1475
1476 u->sink->get_volume = sink_get_volume_cb;
1477 u->sink->set_volume = sink_set_volume_cb;
1478
1479 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1480 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1481 }
1482
1483 if (!u->mixer_path->has_mute) {
1484 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1485 } else {
1486 u->sink->get_mute = sink_get_mute_cb;
1487 u->sink->set_mute = sink_set_mute_cb;
1488 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1489 pa_log_info("Using hardware mute control.");
1490 }
1491
1492 u->mixer_fdl = pa_alsa_fdlist_new();
1493
1494 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1495 pa_log("Failed to initialize file descriptor monitoring");
1496 return -1;
1497 }
1498
1499 if (u->mixer_path_set)
1500 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1501 else
1502 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1503
1504 return 0;
1505 }
1506
1507 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1508
1509 struct userdata *u = NULL;
1510 const char *dev_id = NULL;
1511 pa_sample_spec ss, requested_ss;
1512 pa_channel_map map;
1513 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1514 snd_pcm_uframes_t period_frames, tsched_frames;
1515 size_t frame_size;
1516 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1517 pa_sink_new_data data;
1518 pa_alsa_profile_set *profile_set = NULL;
1519
1520 pa_assert(m);
1521 pa_assert(ma);
1522
1523 ss = m->core->default_sample_spec;
1524 map = m->core->default_channel_map;
1525 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1526 pa_log("Failed to parse sample specification and channel map");
1527 goto fail;
1528 }
1529
1530 requested_ss = ss;
1531 frame_size = pa_frame_size(&ss);
1532
1533 nfrags = m->core->default_n_fragments;
1534 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1535 if (frag_size <= 0)
1536 frag_size = (uint32_t) frame_size;
1537 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1538 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1539
1540 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1541 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1542 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1543 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1544 pa_log("Failed to parse buffer metrics");
1545 goto fail;
1546 }
1547
1548 hwbuf_size = frag_size * nfrags;
1549 period_frames = frag_size/frame_size;
1550 tsched_frames = tsched_size/frame_size;
1551
1552 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1553 pa_log("Failed to parse mmap argument.");
1554 goto fail;
1555 }
1556
1557 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1558 pa_log("Failed to parse tsched argument.");
1559 goto fail;
1560 }
1561
1562 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1563 pa_log("Failed to parse ignore_dB argument.");
1564 goto fail;
1565 }
1566
1567 if (use_tsched && !pa_rtclock_hrtimer()) {
1568 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1569 use_tsched = FALSE;
1570 }
1571
1572 u = pa_xnew0(struct userdata, 1);
1573 u->core = m->core;
1574 u->module = m;
1575 u->use_mmap = use_mmap;
1576 u->use_tsched = use_tsched;
1577 u->first = TRUE;
1578 u->rtpoll = pa_rtpoll_new();
1579 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1580
1581 u->smoother = pa_smoother_new(
1582 DEFAULT_TSCHED_BUFFER_USEC*2,
1583 DEFAULT_TSCHED_BUFFER_USEC*2,
1584 TRUE,
1585 TRUE,
1586 5,
1587 pa_rtclock_now(),
1588 TRUE);
1589
1590 dev_id = pa_modargs_get_value(
1591 ma, "device_id",
1592 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1593
1594 if (reserve_init(u, dev_id) < 0)
1595 goto fail;
1596
1597 if (reserve_monitor_init(u, dev_id) < 0)
1598 goto fail;
1599
1600 b = use_mmap;
1601 d = use_tsched;
1602
1603 if (mapping) {
1604
1605 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1606 pa_log("device_id= not set");
1607 goto fail;
1608 }
1609
1610 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1611 dev_id,
1612 &u->device_name,
1613 &ss, &map,
1614 SND_PCM_STREAM_PLAYBACK,
1615 &nfrags, &period_frames, tsched_frames,
1616 &b, &d, mapping)))
1617
1618 goto fail;
1619
1620 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1621
1622 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1623 goto fail;
1624
1625 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1626 dev_id,
1627 &u->device_name,
1628 &ss, &map,
1629 SND_PCM_STREAM_PLAYBACK,
1630 &nfrags, &period_frames, tsched_frames,
1631 &b, &d, profile_set, &mapping)))
1632
1633 goto fail;
1634
1635 } else {
1636
1637 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1638 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1639 &u->device_name,
1640 &ss, &map,
1641 SND_PCM_STREAM_PLAYBACK,
1642 &nfrags, &period_frames, tsched_frames,
1643 &b, &d, FALSE)))
1644 goto fail;
1645 }
1646
1647 pa_assert(u->device_name);
1648 pa_log_info("Successfully opened device %s.", u->device_name);
1649
1650 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1651 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1652 goto fail;
1653 }
1654
1655 if (mapping)
1656 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1657
1658 if (use_mmap && !b) {
1659 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1660 u->use_mmap = use_mmap = FALSE;
1661 }
1662
1663 if (use_tsched && (!b || !d)) {
1664 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1665 u->use_tsched = use_tsched = FALSE;
1666 }
1667
1668 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1669 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1670 u->use_tsched = use_tsched = FALSE;
1671 }
1672
1673 if (u->use_mmap)
1674 pa_log_info("Successfully enabled mmap() mode.");
1675
1676 if (u->use_tsched)
1677 pa_log_info("Successfully enabled timer-based scheduling mode.");
1678
1679 /* ALSA might tweak the sample spec, so recalculate the frame size */
1680 frame_size = pa_frame_size(&ss);
1681
1682 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1683
1684 pa_sink_new_data_init(&data);
1685 data.driver = driver;
1686 data.module = m;
1687 data.card = card;
1688 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1689 pa_sink_new_data_set_sample_spec(&data, &ss);
1690 pa_sink_new_data_set_channel_map(&data, &map);
1691
1692 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1693 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1694 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1695 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1696 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1697
1698 if (mapping) {
1699 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1700 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1701 }
1702
1703 pa_alsa_init_description(data.proplist);
1704
1705 if (u->control_device)
1706 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1707
1708 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1709 pa_log("Invalid properties");
1710 pa_sink_new_data_done(&data);
1711 goto fail;
1712 }
1713
1714 if (u->mixer_path_set)
1715 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1716
1717 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1718 pa_sink_new_data_done(&data);
1719
1720 if (!u->sink) {
1721 pa_log("Failed to create sink object");
1722 goto fail;
1723 }
1724
1725 u->sink->parent.process_msg = sink_process_msg;
1726 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1727 u->sink->set_state = sink_set_state_cb;
1728 u->sink->set_port = sink_set_port_cb;
1729 u->sink->userdata = u;
1730
1731 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1732 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1733
1734 u->frame_size = frame_size;
1735 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1736 u->nfragments = nfrags;
1737 u->hwbuf_size = u->fragment_size * nfrags;
1738 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1739 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1740
1741 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1742 nfrags, (long unsigned) u->fragment_size,
1743 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1744
1745 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1746 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1747
1748 if (u->use_tsched) {
1749 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1750
1751 fix_min_sleep_wakeup(u);
1752 fix_tsched_watermark(u);
1753
1754 pa_sink_set_latency_range(u->sink,
1755 0,
1756 pa_bytes_to_usec(u->hwbuf_size, &ss));
1757
1758 pa_log_info("Time scheduling watermark is %0.2fms",
1759 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1760 } else
1761 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1762
1763 reserve_update(u);
1764
1765 if (update_sw_params(u) < 0)
1766 goto fail;
1767
1768 if (setup_mixer(u, ignore_dB) < 0)
1769 goto fail;
1770
1771 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1772
1773 if (!(u->thread = pa_thread_new(thread_func, u))) {
1774 pa_log("Failed to create thread.");
1775 goto fail;
1776 }
1777
1778 /* Get initial mixer settings */
1779 if (data.volume_is_set) {
1780 if (u->sink->set_volume)
1781 u->sink->set_volume(u->sink);
1782 } else {
1783 if (u->sink->get_volume)
1784 u->sink->get_volume(u->sink);
1785 }
1786
1787 if (data.muted_is_set) {
1788 if (u->sink->set_mute)
1789 u->sink->set_mute(u->sink);
1790 } else {
1791 if (u->sink->get_mute)
1792 u->sink->get_mute(u->sink);
1793 }
1794
1795 pa_sink_put(u->sink);
1796
1797 if (profile_set)
1798 pa_alsa_profile_set_free(profile_set);
1799
1800 return u->sink;
1801
1802 fail:
1803
1804 if (u)
1805 userdata_free(u);
1806
1807 if (profile_set)
1808 pa_alsa_profile_set_free(profile_set);
1809
1810 return NULL;
1811 }
1812
1813 static void userdata_free(struct userdata *u) {
1814 pa_assert(u);
1815
1816 if (u->sink)
1817 pa_sink_unlink(u->sink);
1818
1819 if (u->thread) {
1820 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1821 pa_thread_free(u->thread);
1822 }
1823
1824 pa_thread_mq_done(&u->thread_mq);
1825
1826 if (u->sink)
1827 pa_sink_unref(u->sink);
1828
1829 if (u->memchunk.memblock)
1830 pa_memblock_unref(u->memchunk.memblock);
1831
1832 if (u->alsa_rtpoll_item)
1833 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1834
1835 if (u->rtpoll)
1836 pa_rtpoll_free(u->rtpoll);
1837
1838 if (u->pcm_handle) {
1839 snd_pcm_drop(u->pcm_handle);
1840 snd_pcm_close(u->pcm_handle);
1841 }
1842
1843 if (u->mixer_fdl)
1844 pa_alsa_fdlist_free(u->mixer_fdl);
1845
1846 if (u->mixer_path_set)
1847 pa_alsa_path_set_free(u->mixer_path_set);
1848 else if (u->mixer_path)
1849 pa_alsa_path_free(u->mixer_path);
1850
1851 if (u->mixer_handle)
1852 snd_mixer_close(u->mixer_handle);
1853
1854 if (u->smoother)
1855 pa_smoother_free(u->smoother);
1856
1857 reserve_done(u);
1858 monitor_done(u);
1859
1860 pa_xfree(u->device_name);
1861 pa_xfree(u->control_device);
1862 pa_xfree(u);
1863 }
1864
1865 void pa_alsa_sink_free(pa_sink *s) {
1866 struct userdata *u;
1867
1868 pa_sink_assert_ref(s);
1869 pa_assert_se(u = s->userdata);
1870
1871 userdata_free(u);
1872 }