1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Digital Audio (PCM) abstract layer
4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
8 #include <linux/module.h>
9 #include <linux/file.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/time.h>
13 #include <linux/pm_qos.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/vmalloc.h>
17 #include <sound/core.h>
18 #include <sound/control.h>
19 #include <sound/info.h>
20 #include <sound/pcm.h>
21 #include <sound/pcm_params.h>
22 #include <sound/timer.h>
23 #include <sound/minors.h>
24 #include <linux/uio.h>
25 #include <linux/delay.h>
27 #include "pcm_local.h"
29 #ifdef CONFIG_SND_DEBUG
30 #define CREATE_TRACE_POINTS
31 #include "pcm_param_trace.h"
33 #define trace_hw_mask_param_enabled() 0
34 #define trace_hw_interval_param_enabled() 0
35 #define trace_hw_mask_param(substream, type, index, prev, curr)
36 #define trace_hw_interval_param(substream, type, index, prev, curr)
43 struct snd_pcm_hw_params_old {
45 unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
46 SNDRV_PCM_HW_PARAM_ACCESS + 1];
47 struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
48 SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
53 unsigned int rate_num;
54 unsigned int rate_den;
55 snd_pcm_uframes_t fifo_size;
56 unsigned char reserved[64];
59 #ifdef CONFIG_SND_SUPPORT_OLD_API
60 #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
61 #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
63 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
64 struct snd_pcm_hw_params_old __user * _oparams);
65 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
66 struct snd_pcm_hw_params_old __user * _oparams);
68 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
74 static DECLARE_RWSEM(snd_pcm_link_rwsem);
76 void snd_pcm_group_init(struct snd_pcm_group *group)
78 spin_lock_init(&group->lock);
79 mutex_init(&group->mutex);
80 INIT_LIST_HEAD(&group->substreams);
81 refcount_set(&group->refs, 1);
84 /* define group lock helpers */
85 #define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \
86 static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \
89 mutex_ ## mutex_action(&group->mutex); \
91 spin_ ## action(&group->lock); \
94 DEFINE_PCM_GROUP_LOCK(lock, lock);
95 DEFINE_PCM_GROUP_LOCK(unlock, unlock);
96 DEFINE_PCM_GROUP_LOCK(lock_irq, lock);
97 DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock);
100 * snd_pcm_stream_lock - Lock the PCM stream
101 * @substream: PCM substream
103 * This locks the PCM stream's spinlock or mutex depending on the nonatomic
104 * flag of the given substream. This also takes the global link rw lock
105 * (or rw sem), too, for avoiding the race with linked streams.
107 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
109 snd_pcm_group_lock(&substream->self_group, substream->pcm->nonatomic);
111 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
114 * snd_pcm_stream_lock - Unlock the PCM stream
115 * @substream: PCM substream
117 * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
119 void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
121 snd_pcm_group_unlock(&substream->self_group, substream->pcm->nonatomic);
123 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
126 * snd_pcm_stream_lock_irq - Lock the PCM stream
127 * @substream: PCM substream
129 * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
130 * IRQ (only when nonatomic is false). In nonatomic case, this is identical
131 * as snd_pcm_stream_lock().
133 void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
135 snd_pcm_group_lock_irq(&substream->self_group,
136 substream->pcm->nonatomic);
138 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
141 * snd_pcm_stream_unlock_irq - Unlock the PCM stream
142 * @substream: PCM substream
144 * This is a counter-part of snd_pcm_stream_lock_irq().
146 void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
148 snd_pcm_group_unlock_irq(&substream->self_group,
149 substream->pcm->nonatomic);
151 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
153 unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
155 unsigned long flags = 0;
156 if (substream->pcm->nonatomic)
157 mutex_lock(&substream->self_group.mutex);
159 spin_lock_irqsave(&substream->self_group.lock, flags);
162 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
165 * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
166 * @substream: PCM substream
169 * This is a counter-part of snd_pcm_stream_lock_irqsave().
171 void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
174 if (substream->pcm->nonatomic)
175 mutex_unlock(&substream->self_group.mutex);
177 spin_unlock_irqrestore(&substream->self_group.lock, flags);
179 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
181 /* Run PCM ioctl ops */
182 static int snd_pcm_ops_ioctl(struct snd_pcm_substream *substream,
183 unsigned cmd, void *arg)
185 if (substream->ops->ioctl)
186 return substream->ops->ioctl(substream, cmd, arg);
188 return snd_pcm_lib_ioctl(substream, cmd, arg);
191 int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
193 struct snd_pcm *pcm = substream->pcm;
194 struct snd_pcm_str *pstr = substream->pstr;
196 memset(info, 0, sizeof(*info));
197 info->card = pcm->card->number;
198 info->device = pcm->device;
199 info->stream = substream->stream;
200 info->subdevice = substream->number;
201 strlcpy(info->id, pcm->id, sizeof(info->id));
202 strlcpy(info->name, pcm->name, sizeof(info->name));
203 info->dev_class = pcm->dev_class;
204 info->dev_subclass = pcm->dev_subclass;
205 info->subdevices_count = pstr->substream_count;
206 info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
207 strlcpy(info->subname, substream->name, sizeof(info->subname));
212 int snd_pcm_info_user(struct snd_pcm_substream *substream,
213 struct snd_pcm_info __user * _info)
215 struct snd_pcm_info *info;
218 info = kmalloc(sizeof(*info), GFP_KERNEL);
221 err = snd_pcm_info(substream, info);
223 if (copy_to_user(_info, info, sizeof(*info)))
230 static bool hw_support_mmap(struct snd_pcm_substream *substream)
232 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
235 if (substream->ops->mmap ||
236 (substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV &&
237 substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV_UC))
240 return dma_can_mmap(substream->dma_buffer.dev.dev);
243 static int constrain_mask_params(struct snd_pcm_substream *substream,
244 struct snd_pcm_hw_params *params)
246 struct snd_pcm_hw_constraints *constrs =
247 &substream->runtime->hw_constraints;
250 struct snd_mask old_mask;
253 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
254 m = hw_param_mask(params, k);
255 if (snd_mask_empty(m))
258 /* This parameter is not requested to change by a caller. */
259 if (!(params->rmask & (1 << k)))
262 if (trace_hw_mask_param_enabled())
265 changed = snd_mask_refine(m, constrs_mask(constrs, k));
271 /* Set corresponding flag so that the caller gets it. */
272 trace_hw_mask_param(substream, k, 0, &old_mask, m);
273 params->cmask |= 1 << k;
279 static int constrain_interval_params(struct snd_pcm_substream *substream,
280 struct snd_pcm_hw_params *params)
282 struct snd_pcm_hw_constraints *constrs =
283 &substream->runtime->hw_constraints;
284 struct snd_interval *i;
286 struct snd_interval old_interval;
289 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
290 i = hw_param_interval(params, k);
291 if (snd_interval_empty(i))
294 /* This parameter is not requested to change by a caller. */
295 if (!(params->rmask & (1 << k)))
298 if (trace_hw_interval_param_enabled())
301 changed = snd_interval_refine(i, constrs_interval(constrs, k));
307 /* Set corresponding flag so that the caller gets it. */
308 trace_hw_interval_param(substream, k, 0, &old_interval, i);
309 params->cmask |= 1 << k;
315 static int constrain_params_by_rules(struct snd_pcm_substream *substream,
316 struct snd_pcm_hw_params *params)
318 struct snd_pcm_hw_constraints *constrs =
319 &substream->runtime->hw_constraints;
321 unsigned int *rstamps;
322 unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
324 struct snd_pcm_hw_rule *r;
326 struct snd_mask old_mask;
327 struct snd_interval old_interval;
329 int changed, err = 0;
332 * Each application of rule has own sequence number.
334 * Each member of 'rstamps' array represents the sequence number of
335 * recent application of corresponding rule.
337 rstamps = kcalloc(constrs->rules_num, sizeof(unsigned int), GFP_KERNEL);
342 * Each member of 'vstamps' array represents the sequence number of
343 * recent application of rule in which corresponding parameters were
346 * In initial state, elements corresponding to parameters requested by
347 * a caller is 1. For unrequested parameters, corresponding members
348 * have 0 so that the parameters are never changed anymore.
350 for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
351 vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
353 /* Due to the above design, actual sequence number starts at 2. */
356 /* Apply all rules in order. */
358 for (k = 0; k < constrs->rules_num; k++) {
359 r = &constrs->rules[k];
362 * Check condition bits of this rule. When the rule has
363 * some condition bits, parameter without the bits is
364 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
365 * is an example of the condition bits.
367 if (r->cond && !(r->cond & params->flags))
371 * The 'deps' array includes maximum three dependencies
372 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
373 * member of this array is a sentinel and should be
376 * This rule should be processed in this time when dependent
377 * parameters were changed at former applications of the other
380 for (d = 0; r->deps[d] >= 0; d++) {
381 if (vstamps[r->deps[d]] > rstamps[k])
387 if (trace_hw_mask_param_enabled()) {
388 if (hw_is_mask(r->var))
389 old_mask = *hw_param_mask(params, r->var);
391 if (trace_hw_interval_param_enabled()) {
392 if (hw_is_interval(r->var))
393 old_interval = *hw_param_interval(params, r->var);
396 changed = r->func(params, r);
403 * When the parameter is changed, notify it to the caller
404 * by corresponding returned bit, then preparing for next
407 if (changed && r->var >= 0) {
408 if (hw_is_mask(r->var)) {
409 trace_hw_mask_param(substream, r->var,
411 hw_param_mask(params, r->var));
413 if (hw_is_interval(r->var)) {
414 trace_hw_interval_param(substream, r->var,
415 k + 1, &old_interval,
416 hw_param_interval(params, r->var));
419 params->cmask |= (1 << r->var);
420 vstamps[r->var] = stamp;
424 rstamps[k] = stamp++;
427 /* Iterate to evaluate all rules till no parameters are changed. */
436 static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
437 struct snd_pcm_hw_params *params)
439 const struct snd_interval *i;
440 const struct snd_mask *m;
443 if (!params->msbits) {
444 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
445 if (snd_interval_single(i))
446 params->msbits = snd_interval_value(i);
449 if (!params->rate_den) {
450 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
451 if (snd_interval_single(i)) {
452 params->rate_num = snd_interval_value(i);
453 params->rate_den = 1;
457 if (!params->fifo_size) {
458 m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
459 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
460 if (snd_mask_single(m) && snd_interval_single(i)) {
461 err = snd_pcm_ops_ioctl(substream,
462 SNDRV_PCM_IOCTL1_FIFO_SIZE,
470 params->info = substream->runtime->hw.info;
471 params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
472 SNDRV_PCM_INFO_DRAIN_TRIGGER);
473 if (!hw_support_mmap(substream))
474 params->info &= ~(SNDRV_PCM_INFO_MMAP |
475 SNDRV_PCM_INFO_MMAP_VALID);
481 int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
482 struct snd_pcm_hw_params *params)
487 params->fifo_size = 0;
488 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
490 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
491 params->rate_num = 0;
492 params->rate_den = 0;
495 err = constrain_mask_params(substream, params);
499 err = constrain_interval_params(substream, params);
503 err = constrain_params_by_rules(substream, params);
511 EXPORT_SYMBOL(snd_pcm_hw_refine);
513 static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
514 struct snd_pcm_hw_params __user * _params)
516 struct snd_pcm_hw_params *params;
519 params = memdup_user(_params, sizeof(*params));
521 return PTR_ERR(params);
523 err = snd_pcm_hw_refine(substream, params);
527 err = fixup_unreferenced_params(substream, params);
531 if (copy_to_user(_params, params, sizeof(*params)))
538 static int period_to_usecs(struct snd_pcm_runtime *runtime)
543 return -1; /* invalid */
545 /* take 75% of period time as the deadline */
546 usecs = (750000 / runtime->rate) * runtime->period_size;
547 usecs += ((750000 % runtime->rate) * runtime->period_size) /
553 static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
555 snd_pcm_stream_lock_irq(substream);
556 if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
557 substream->runtime->status->state = state;
558 snd_pcm_stream_unlock_irq(substream);
561 static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
564 #ifdef CONFIG_SND_PCM_TIMER
565 if (substream->timer)
566 snd_timer_notify(substream->timer, event,
567 &substream->runtime->trigger_tstamp);
571 static void snd_pcm_sync_stop(struct snd_pcm_substream *substream)
573 if (substream->runtime->stop_operating) {
574 substream->runtime->stop_operating = false;
575 if (substream->ops->sync_stop)
576 substream->ops->sync_stop(substream);
577 else if (substream->pcm->card->sync_irq > 0)
578 synchronize_irq(substream->pcm->card->sync_irq);
583 * snd_pcm_hw_param_choose - choose a configuration defined by @params
585 * @params: the hw_params instance
587 * Choose one configuration from configuration space defined by @params.
588 * The configuration chosen is that obtained fixing in this order:
589 * first access, first format, first subformat, min channels,
590 * min rate, min period time, max buffer size, min tick time
592 * Return: Zero if successful, or a negative error code on failure.
594 static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
595 struct snd_pcm_hw_params *params)
597 static const int vars[] = {
598 SNDRV_PCM_HW_PARAM_ACCESS,
599 SNDRV_PCM_HW_PARAM_FORMAT,
600 SNDRV_PCM_HW_PARAM_SUBFORMAT,
601 SNDRV_PCM_HW_PARAM_CHANNELS,
602 SNDRV_PCM_HW_PARAM_RATE,
603 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
604 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
605 SNDRV_PCM_HW_PARAM_TICK_TIME,
609 struct snd_mask old_mask;
610 struct snd_interval old_interval;
613 for (v = vars; *v != -1; v++) {
614 /* Keep old parameter to trace. */
615 if (trace_hw_mask_param_enabled()) {
617 old_mask = *hw_param_mask(params, *v);
619 if (trace_hw_interval_param_enabled()) {
620 if (hw_is_interval(*v))
621 old_interval = *hw_param_interval(params, *v);
623 if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
624 changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
626 changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
632 /* Trace the changed parameter. */
633 if (hw_is_mask(*v)) {
634 trace_hw_mask_param(pcm, *v, 0, &old_mask,
635 hw_param_mask(params, *v));
637 if (hw_is_interval(*v)) {
638 trace_hw_interval_param(pcm, *v, 0, &old_interval,
639 hw_param_interval(params, *v));
646 static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
647 struct snd_pcm_hw_params *params)
649 struct snd_pcm_runtime *runtime;
652 snd_pcm_uframes_t frames;
654 if (PCM_RUNTIME_CHECK(substream))
656 runtime = substream->runtime;
657 snd_pcm_stream_lock_irq(substream);
658 switch (runtime->status->state) {
659 case SNDRV_PCM_STATE_OPEN:
660 case SNDRV_PCM_STATE_SETUP:
661 case SNDRV_PCM_STATE_PREPARED:
664 snd_pcm_stream_unlock_irq(substream);
667 snd_pcm_stream_unlock_irq(substream);
668 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
669 if (!substream->oss.oss)
671 if (atomic_read(&substream->mmap_count))
674 snd_pcm_sync_stop(substream);
677 err = snd_pcm_hw_refine(substream, params);
681 err = snd_pcm_hw_params_choose(substream, params);
685 err = fixup_unreferenced_params(substream, params);
689 if (substream->managed_buffer_alloc) {
690 err = snd_pcm_lib_malloc_pages(substream,
691 params_buffer_bytes(params));
694 runtime->buffer_changed = err > 0;
697 if (substream->ops->hw_params != NULL) {
698 err = substream->ops->hw_params(substream, params);
703 runtime->access = params_access(params);
704 runtime->format = params_format(params);
705 runtime->subformat = params_subformat(params);
706 runtime->channels = params_channels(params);
707 runtime->rate = params_rate(params);
708 runtime->period_size = params_period_size(params);
709 runtime->periods = params_periods(params);
710 runtime->buffer_size = params_buffer_size(params);
711 runtime->info = params->info;
712 runtime->rate_num = params->rate_num;
713 runtime->rate_den = params->rate_den;
714 runtime->no_period_wakeup =
715 (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
716 (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
718 bits = snd_pcm_format_physical_width(runtime->format);
719 runtime->sample_bits = bits;
720 bits *= runtime->channels;
721 runtime->frame_bits = bits;
723 while (bits % 8 != 0) {
727 runtime->byte_align = bits / 8;
728 runtime->min_align = frames;
730 /* Default sw params */
731 runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
732 runtime->period_step = 1;
733 runtime->control->avail_min = runtime->period_size;
734 runtime->start_threshold = 1;
735 runtime->stop_threshold = runtime->buffer_size;
736 runtime->silence_threshold = 0;
737 runtime->silence_size = 0;
738 runtime->boundary = runtime->buffer_size;
739 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
740 runtime->boundary *= 2;
742 snd_pcm_timer_resolution_change(substream);
743 snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
745 if (pm_qos_request_active(&substream->latency_pm_qos_req))
746 pm_qos_remove_request(&substream->latency_pm_qos_req);
747 if ((usecs = period_to_usecs(runtime)) >= 0)
748 pm_qos_add_request(&substream->latency_pm_qos_req,
749 PM_QOS_CPU_DMA_LATENCY, usecs);
752 /* hardware might be unusable from this time,
753 so we force application to retry to set
754 the correct hardware parameter settings */
755 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
756 if (substream->ops->hw_free != NULL)
757 substream->ops->hw_free(substream);
758 if (substream->managed_buffer_alloc)
759 snd_pcm_lib_free_pages(substream);
763 static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
764 struct snd_pcm_hw_params __user * _params)
766 struct snd_pcm_hw_params *params;
769 params = memdup_user(_params, sizeof(*params));
771 return PTR_ERR(params);
773 err = snd_pcm_hw_params(substream, params);
777 if (copy_to_user(_params, params, sizeof(*params)))
784 static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
786 struct snd_pcm_runtime *runtime;
789 if (PCM_RUNTIME_CHECK(substream))
791 runtime = substream->runtime;
792 snd_pcm_stream_lock_irq(substream);
793 switch (runtime->status->state) {
794 case SNDRV_PCM_STATE_SETUP:
795 case SNDRV_PCM_STATE_PREPARED:
798 snd_pcm_stream_unlock_irq(substream);
801 snd_pcm_stream_unlock_irq(substream);
802 if (atomic_read(&substream->mmap_count))
804 snd_pcm_sync_stop(substream);
805 if (substream->ops->hw_free)
806 result = substream->ops->hw_free(substream);
807 if (substream->managed_buffer_alloc)
808 snd_pcm_lib_free_pages(substream);
809 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
810 pm_qos_remove_request(&substream->latency_pm_qos_req);
814 static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
815 struct snd_pcm_sw_params *params)
817 struct snd_pcm_runtime *runtime;
820 if (PCM_RUNTIME_CHECK(substream))
822 runtime = substream->runtime;
823 snd_pcm_stream_lock_irq(substream);
824 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
825 snd_pcm_stream_unlock_irq(substream);
828 snd_pcm_stream_unlock_irq(substream);
830 if (params->tstamp_mode < 0 ||
831 params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
833 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
834 params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
836 if (params->avail_min == 0)
838 if (params->silence_size >= runtime->boundary) {
839 if (params->silence_threshold != 0)
842 if (params->silence_size > params->silence_threshold)
844 if (params->silence_threshold > runtime->buffer_size)
848 snd_pcm_stream_lock_irq(substream);
849 runtime->tstamp_mode = params->tstamp_mode;
850 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
851 runtime->tstamp_type = params->tstamp_type;
852 runtime->period_step = params->period_step;
853 runtime->control->avail_min = params->avail_min;
854 runtime->start_threshold = params->start_threshold;
855 runtime->stop_threshold = params->stop_threshold;
856 runtime->silence_threshold = params->silence_threshold;
857 runtime->silence_size = params->silence_size;
858 params->boundary = runtime->boundary;
859 if (snd_pcm_running(substream)) {
860 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
861 runtime->silence_size > 0)
862 snd_pcm_playback_silence(substream, ULONG_MAX);
863 err = snd_pcm_update_state(substream, runtime);
865 snd_pcm_stream_unlock_irq(substream);
869 static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
870 struct snd_pcm_sw_params __user * _params)
872 struct snd_pcm_sw_params params;
874 if (copy_from_user(¶ms, _params, sizeof(params)))
876 err = snd_pcm_sw_params(substream, ¶ms);
877 if (copy_to_user(_params, ¶ms, sizeof(params)))
882 static inline snd_pcm_uframes_t
883 snd_pcm_calc_delay(struct snd_pcm_substream *substream)
885 snd_pcm_uframes_t delay;
887 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
888 delay = snd_pcm_playback_hw_avail(substream->runtime);
890 delay = snd_pcm_capture_avail(substream->runtime);
891 return delay + substream->runtime->delay;
894 int snd_pcm_status64(struct snd_pcm_substream *substream,
895 struct snd_pcm_status64 *status)
897 struct snd_pcm_runtime *runtime = substream->runtime;
899 snd_pcm_stream_lock_irq(substream);
901 snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
902 &runtime->audio_tstamp_config);
904 /* backwards compatible behavior */
905 if (runtime->audio_tstamp_config.type_requested ==
906 SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
907 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
908 runtime->audio_tstamp_config.type_requested =
909 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
911 runtime->audio_tstamp_config.type_requested =
912 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
913 runtime->audio_tstamp_report.valid = 0;
915 runtime->audio_tstamp_report.valid = 1;
917 status->state = runtime->status->state;
918 status->suspended_state = runtime->status->suspended_state;
919 if (status->state == SNDRV_PCM_STATE_OPEN)
921 status->trigger_tstamp_sec = runtime->trigger_tstamp.tv_sec;
922 status->trigger_tstamp_nsec = runtime->trigger_tstamp.tv_nsec;
923 if (snd_pcm_running(substream)) {
924 snd_pcm_update_hw_ptr(substream);
925 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
926 status->tstamp_sec = runtime->status->tstamp.tv_sec;
927 status->tstamp_nsec =
928 runtime->status->tstamp.tv_nsec;
929 status->driver_tstamp_sec =
930 runtime->driver_tstamp.tv_sec;
931 status->driver_tstamp_nsec =
932 runtime->driver_tstamp.tv_nsec;
933 status->audio_tstamp_sec =
934 runtime->status->audio_tstamp.tv_sec;
935 status->audio_tstamp_nsec =
936 runtime->status->audio_tstamp.tv_nsec;
937 if (runtime->audio_tstamp_report.valid == 1)
938 /* backwards compatibility, no report provided in COMPAT mode */
939 snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
940 &status->audio_tstamp_accuracy,
941 &runtime->audio_tstamp_report);
946 /* get tstamp only in fallback mode and only if enabled */
947 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
948 struct timespec64 tstamp;
950 snd_pcm_gettime(runtime, &tstamp);
951 status->tstamp_sec = tstamp.tv_sec;
952 status->tstamp_nsec = tstamp.tv_nsec;
956 status->appl_ptr = runtime->control->appl_ptr;
957 status->hw_ptr = runtime->status->hw_ptr;
958 status->avail = snd_pcm_avail(substream);
959 status->delay = snd_pcm_running(substream) ?
960 snd_pcm_calc_delay(substream) : 0;
961 status->avail_max = runtime->avail_max;
962 status->overrange = runtime->overrange;
963 runtime->avail_max = 0;
964 runtime->overrange = 0;
966 snd_pcm_stream_unlock_irq(substream);
970 static int snd_pcm_status_user64(struct snd_pcm_substream *substream,
971 struct snd_pcm_status64 __user * _status,
974 struct snd_pcm_status64 status;
977 memset(&status, 0, sizeof(status));
979 * with extension, parameters are read/write,
980 * get audio_tstamp_data from user,
981 * ignore rest of status structure
983 if (ext && get_user(status.audio_tstamp_data,
984 (u32 __user *)(&_status->audio_tstamp_data)))
986 res = snd_pcm_status64(substream, &status);
989 if (copy_to_user(_status, &status, sizeof(status)))
994 static int snd_pcm_status_user32(struct snd_pcm_substream *substream,
995 struct snd_pcm_status32 __user * _status,
998 struct snd_pcm_status64 status64;
999 struct snd_pcm_status32 status32;
1002 memset(&status64, 0, sizeof(status64));
1003 memset(&status32, 0, sizeof(status32));
1005 * with extension, parameters are read/write,
1006 * get audio_tstamp_data from user,
1007 * ignore rest of status structure
1009 if (ext && get_user(status64.audio_tstamp_data,
1010 (u32 __user *)(&_status->audio_tstamp_data)))
1012 res = snd_pcm_status64(substream, &status64);
1016 status32 = (struct snd_pcm_status32) {
1017 .state = status64.state,
1018 .trigger_tstamp_sec = status64.trigger_tstamp_sec,
1019 .trigger_tstamp_nsec = status64.trigger_tstamp_nsec,
1020 .tstamp_sec = status64.tstamp_sec,
1021 .tstamp_nsec = status64.tstamp_nsec,
1022 .appl_ptr = status64.appl_ptr,
1023 .hw_ptr = status64.hw_ptr,
1024 .delay = status64.delay,
1025 .avail = status64.avail,
1026 .avail_max = status64.avail_max,
1027 .overrange = status64.overrange,
1028 .suspended_state = status64.suspended_state,
1029 .audio_tstamp_data = status64.audio_tstamp_data,
1030 .audio_tstamp_sec = status64.audio_tstamp_sec,
1031 .audio_tstamp_nsec = status64.audio_tstamp_nsec,
1032 .driver_tstamp_sec = status64.audio_tstamp_sec,
1033 .driver_tstamp_nsec = status64.audio_tstamp_nsec,
1034 .audio_tstamp_accuracy = status64.audio_tstamp_accuracy,
1037 if (copy_to_user(_status, &status32, sizeof(status32)))
1043 static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
1044 struct snd_pcm_channel_info * info)
1046 struct snd_pcm_runtime *runtime;
1047 unsigned int channel;
1049 channel = info->channel;
1050 runtime = substream->runtime;
1051 snd_pcm_stream_lock_irq(substream);
1052 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
1053 snd_pcm_stream_unlock_irq(substream);
1056 snd_pcm_stream_unlock_irq(substream);
1057 if (channel >= runtime->channels)
1059 memset(info, 0, sizeof(*info));
1060 info->channel = channel;
1061 return snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
1064 static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
1065 struct snd_pcm_channel_info __user * _info)
1067 struct snd_pcm_channel_info info;
1070 if (copy_from_user(&info, _info, sizeof(info)))
1072 res = snd_pcm_channel_info(substream, &info);
1075 if (copy_to_user(_info, &info, sizeof(info)))
1080 static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
1082 struct snd_pcm_runtime *runtime = substream->runtime;
1083 if (runtime->trigger_master == NULL)
1085 if (runtime->trigger_master == substream) {
1086 if (!runtime->trigger_tstamp_latched)
1087 snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
1089 snd_pcm_trigger_tstamp(runtime->trigger_master);
1090 runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
1092 runtime->trigger_master = NULL;
1096 int (*pre_action)(struct snd_pcm_substream *substream, int state);
1097 int (*do_action)(struct snd_pcm_substream *substream, int state);
1098 void (*undo_action)(struct snd_pcm_substream *substream, int state);
1099 void (*post_action)(struct snd_pcm_substream *substream, int state);
1103 * this functions is core for handling of linked stream
1104 * Note: the stream state might be changed also on failure
1105 * Note2: call with calling stream lock + link lock
1107 static int snd_pcm_action_group(const struct action_ops *ops,
1108 struct snd_pcm_substream *substream,
1109 int state, int do_lock)
1111 struct snd_pcm_substream *s = NULL;
1112 struct snd_pcm_substream *s1;
1113 int res = 0, depth = 1;
1115 snd_pcm_group_for_each_entry(s, substream) {
1116 if (do_lock && s != substream) {
1117 if (s->pcm->nonatomic)
1118 mutex_lock_nested(&s->self_group.mutex, depth);
1120 spin_lock_nested(&s->self_group.lock, depth);
1123 res = ops->pre_action(s, state);
1127 snd_pcm_group_for_each_entry(s, substream) {
1128 res = ops->do_action(s, state);
1130 if (ops->undo_action) {
1131 snd_pcm_group_for_each_entry(s1, substream) {
1132 if (s1 == s) /* failed stream */
1134 ops->undo_action(s1, state);
1137 s = NULL; /* unlock all */
1141 snd_pcm_group_for_each_entry(s, substream) {
1142 ops->post_action(s, state);
1146 /* unlock streams */
1147 snd_pcm_group_for_each_entry(s1, substream) {
1148 if (s1 != substream) {
1149 if (s1->pcm->nonatomic)
1150 mutex_unlock(&s1->self_group.mutex);
1152 spin_unlock(&s1->self_group.lock);
1154 if (s1 == s) /* end */
1162 * Note: call with stream lock
1164 static int snd_pcm_action_single(const struct action_ops *ops,
1165 struct snd_pcm_substream *substream,
1170 res = ops->pre_action(substream, state);
1173 res = ops->do_action(substream, state);
1175 ops->post_action(substream, state);
1176 else if (ops->undo_action)
1177 ops->undo_action(substream, state);
1181 static void snd_pcm_group_assign(struct snd_pcm_substream *substream,
1182 struct snd_pcm_group *new_group)
1184 substream->group = new_group;
1185 list_move(&substream->link_list, &new_group->substreams);
1189 * Unref and unlock the group, but keep the stream lock;
1190 * when the group becomes empty and no longer referred, destroy itself
1192 static void snd_pcm_group_unref(struct snd_pcm_group *group,
1193 struct snd_pcm_substream *substream)
1199 do_free = refcount_dec_and_test(&group->refs);
1200 snd_pcm_group_unlock(group, substream->pcm->nonatomic);
1206 * Lock the group inside a stream lock and reference it;
1207 * return the locked group object, or NULL if not linked
1209 static struct snd_pcm_group *
1210 snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
1212 bool nonatomic = substream->pcm->nonatomic;
1213 struct snd_pcm_group *group;
1217 if (!snd_pcm_stream_linked(substream))
1219 group = substream->group;
1220 /* block freeing the group object */
1221 refcount_inc(&group->refs);
1223 trylock = nonatomic ? mutex_trylock(&group->mutex) :
1224 spin_trylock(&group->lock);
1228 /* re-lock for avoiding ABBA deadlock */
1229 snd_pcm_stream_unlock(substream);
1230 snd_pcm_group_lock(group, nonatomic);
1231 snd_pcm_stream_lock(substream);
1233 /* check the group again; the above opens a small race window */
1234 if (substream->group == group)
1236 /* group changed, try again */
1237 snd_pcm_group_unref(group, substream);
1243 * Note: call with stream lock
1245 static int snd_pcm_action(const struct action_ops *ops,
1246 struct snd_pcm_substream *substream,
1249 struct snd_pcm_group *group;
1252 group = snd_pcm_stream_group_ref(substream);
1254 res = snd_pcm_action_group(ops, substream, state, 1);
1256 res = snd_pcm_action_single(ops, substream, state);
1257 snd_pcm_group_unref(group, substream);
1262 * Note: don't use any locks before
1264 static int snd_pcm_action_lock_irq(const struct action_ops *ops,
1265 struct snd_pcm_substream *substream,
1270 snd_pcm_stream_lock_irq(substream);
1271 res = snd_pcm_action(ops, substream, state);
1272 snd_pcm_stream_unlock_irq(substream);
1278 static int snd_pcm_action_nonatomic(const struct action_ops *ops,
1279 struct snd_pcm_substream *substream,
1284 /* Guarantee the group members won't change during non-atomic action */
1285 down_read(&snd_pcm_link_rwsem);
1286 if (snd_pcm_stream_linked(substream))
1287 res = snd_pcm_action_group(ops, substream, state, 0);
1289 res = snd_pcm_action_single(ops, substream, state);
1290 up_read(&snd_pcm_link_rwsem);
1297 static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
1299 struct snd_pcm_runtime *runtime = substream->runtime;
1300 if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
1302 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1303 !snd_pcm_playback_data(substream))
1305 runtime->trigger_tstamp_latched = false;
1306 runtime->trigger_master = substream;
1310 static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state)
1312 if (substream->runtime->trigger_master != substream)
1314 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
1317 static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state)
1319 if (substream->runtime->trigger_master == substream)
1320 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1323 static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state)
1325 struct snd_pcm_runtime *runtime = substream->runtime;
1326 snd_pcm_trigger_tstamp(substream);
1327 runtime->hw_ptr_jiffies = jiffies;
1328 runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
1330 runtime->status->state = state;
1331 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1332 runtime->silence_size > 0)
1333 snd_pcm_playback_silence(substream, ULONG_MAX);
1334 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART);
1337 static const struct action_ops snd_pcm_action_start = {
1338 .pre_action = snd_pcm_pre_start,
1339 .do_action = snd_pcm_do_start,
1340 .undo_action = snd_pcm_undo_start,
1341 .post_action = snd_pcm_post_start
1345 * snd_pcm_start - start all linked streams
1346 * @substream: the PCM substream instance
1348 * Return: Zero if successful, or a negative error code.
1349 * The stream lock must be acquired before calling this function.
1351 int snd_pcm_start(struct snd_pcm_substream *substream)
1353 return snd_pcm_action(&snd_pcm_action_start, substream,
1354 SNDRV_PCM_STATE_RUNNING);
1357 /* take the stream lock and start the streams */
1358 static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
1360 return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
1361 SNDRV_PCM_STATE_RUNNING);
1367 static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
1369 struct snd_pcm_runtime *runtime = substream->runtime;
1370 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1372 runtime->trigger_master = substream;
1376 static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
1378 if (substream->runtime->trigger_master == substream &&
1379 snd_pcm_running(substream))
1380 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1381 return 0; /* unconditonally stop all substreams */
1384 static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
1386 struct snd_pcm_runtime *runtime = substream->runtime;
1387 if (runtime->status->state != state) {
1388 snd_pcm_trigger_tstamp(substream);
1389 runtime->status->state = state;
1390 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
1392 runtime->stop_operating = true;
1393 wake_up(&runtime->sleep);
1394 wake_up(&runtime->tsleep);
1397 static const struct action_ops snd_pcm_action_stop = {
1398 .pre_action = snd_pcm_pre_stop,
1399 .do_action = snd_pcm_do_stop,
1400 .post_action = snd_pcm_post_stop
1404 * snd_pcm_stop - try to stop all running streams in the substream group
1405 * @substream: the PCM substream instance
1406 * @state: PCM state after stopping the stream
1408 * The state of each stream is then changed to the given state unconditionally.
1410 * Return: Zero if successful, or a negative error code.
1412 int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
1414 return snd_pcm_action(&snd_pcm_action_stop, substream, state);
1416 EXPORT_SYMBOL(snd_pcm_stop);
1419 * snd_pcm_drain_done - stop the DMA only when the given stream is playback
1420 * @substream: the PCM substream
1422 * After stopping, the state is changed to SETUP.
1423 * Unlike snd_pcm_stop(), this affects only the given stream.
1425 * Return: Zero if succesful, or a negative error code.
1427 int snd_pcm_drain_done(struct snd_pcm_substream *substream)
1429 return snd_pcm_action_single(&snd_pcm_action_stop, substream,
1430 SNDRV_PCM_STATE_SETUP);
1434 * snd_pcm_stop_xrun - stop the running streams as XRUN
1435 * @substream: the PCM substream instance
1437 * This stops the given running substream (and all linked substreams) as XRUN.
1438 * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
1440 * Return: Zero if successful, or a negative error code.
1442 int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
1444 unsigned long flags;
1446 snd_pcm_stream_lock_irqsave(substream, flags);
1447 if (substream->runtime && snd_pcm_running(substream))
1448 __snd_pcm_xrun(substream);
1449 snd_pcm_stream_unlock_irqrestore(substream, flags);
1452 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
1457 static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
1459 struct snd_pcm_runtime *runtime = substream->runtime;
1460 if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
1463 if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
1465 } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
1467 runtime->trigger_master = substream;
1471 static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
1473 if (substream->runtime->trigger_master != substream)
1475 /* some drivers might use hw_ptr to recover from the pause -
1476 update the hw_ptr now */
1478 snd_pcm_update_hw_ptr(substream);
1479 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
1480 * a delta between the current jiffies, this gives a large enough
1481 * delta, effectively to skip the check once.
1483 substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
1484 return substream->ops->trigger(substream,
1485 push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH :
1486 SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
1489 static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push)
1491 if (substream->runtime->trigger_master == substream)
1492 substream->ops->trigger(substream,
1493 push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
1494 SNDRV_PCM_TRIGGER_PAUSE_PUSH);
1497 static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push)
1499 struct snd_pcm_runtime *runtime = substream->runtime;
1500 snd_pcm_trigger_tstamp(substream);
1502 runtime->status->state = SNDRV_PCM_STATE_PAUSED;
1503 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
1504 wake_up(&runtime->sleep);
1505 wake_up(&runtime->tsleep);
1507 runtime->status->state = SNDRV_PCM_STATE_RUNNING;
1508 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE);
1512 static const struct action_ops snd_pcm_action_pause = {
1513 .pre_action = snd_pcm_pre_pause,
1514 .do_action = snd_pcm_do_pause,
1515 .undo_action = snd_pcm_undo_pause,
1516 .post_action = snd_pcm_post_pause
1520 * Push/release the pause for all linked streams.
1522 static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
1524 return snd_pcm_action(&snd_pcm_action_pause, substream, push);
1530 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
1532 struct snd_pcm_runtime *runtime = substream->runtime;
1533 switch (runtime->status->state) {
1534 case SNDRV_PCM_STATE_SUSPENDED:
1536 /* unresumable PCM state; return -EBUSY for skipping suspend */
1537 case SNDRV_PCM_STATE_OPEN:
1538 case SNDRV_PCM_STATE_SETUP:
1539 case SNDRV_PCM_STATE_DISCONNECTED:
1542 runtime->trigger_master = substream;
1546 static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
1548 struct snd_pcm_runtime *runtime = substream->runtime;
1549 if (runtime->trigger_master != substream)
1551 if (! snd_pcm_running(substream))
1553 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1554 return 0; /* suspend unconditionally */
1557 static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
1559 struct snd_pcm_runtime *runtime = substream->runtime;
1560 snd_pcm_trigger_tstamp(substream);
1561 runtime->status->suspended_state = runtime->status->state;
1562 runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
1563 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND);
1564 wake_up(&runtime->sleep);
1565 wake_up(&runtime->tsleep);
1568 static const struct action_ops snd_pcm_action_suspend = {
1569 .pre_action = snd_pcm_pre_suspend,
1570 .do_action = snd_pcm_do_suspend,
1571 .post_action = snd_pcm_post_suspend
1575 * snd_pcm_suspend - trigger SUSPEND to all linked streams
1576 * @substream: the PCM substream
1578 * After this call, all streams are changed to SUSPENDED state.
1580 * Return: Zero if successful, or a negative error code.
1582 static int snd_pcm_suspend(struct snd_pcm_substream *substream)
1585 unsigned long flags;
1587 snd_pcm_stream_lock_irqsave(substream, flags);
1588 err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
1589 snd_pcm_stream_unlock_irqrestore(substream, flags);
1594 * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
1595 * @pcm: the PCM instance
1597 * After this call, all streams are changed to SUSPENDED state.
1599 * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
1601 int snd_pcm_suspend_all(struct snd_pcm *pcm)
1603 struct snd_pcm_substream *substream;
1604 int stream, err = 0;
1609 for (stream = 0; stream < 2; stream++) {
1610 for (substream = pcm->streams[stream].substream;
1611 substream; substream = substream->next) {
1612 /* FIXME: the open/close code should lock this as well */
1613 if (substream->runtime == NULL)
1617 * Skip BE dai link PCM's that are internal and may
1618 * not have their substream ops set.
1620 if (!substream->ops)
1623 err = snd_pcm_suspend(substream);
1624 if (err < 0 && err != -EBUSY)
1630 EXPORT_SYMBOL(snd_pcm_suspend_all);
1634 static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
1636 struct snd_pcm_runtime *runtime = substream->runtime;
1637 if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
1639 runtime->trigger_master = substream;
1643 static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
1645 struct snd_pcm_runtime *runtime = substream->runtime;
1646 if (runtime->trigger_master != substream)
1648 /* DMA not running previously? */
1649 if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING &&
1650 (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING ||
1651 substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
1653 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
1656 static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state)
1658 if (substream->runtime->trigger_master == substream &&
1659 snd_pcm_running(substream))
1660 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1663 static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
1665 struct snd_pcm_runtime *runtime = substream->runtime;
1666 snd_pcm_trigger_tstamp(substream);
1667 runtime->status->state = runtime->status->suspended_state;
1668 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
1669 snd_pcm_sync_stop(substream);
1672 static const struct action_ops snd_pcm_action_resume = {
1673 .pre_action = snd_pcm_pre_resume,
1674 .do_action = snd_pcm_do_resume,
1675 .undo_action = snd_pcm_undo_resume,
1676 .post_action = snd_pcm_post_resume
1679 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1681 return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
1686 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1691 #endif /* CONFIG_PM */
1696 * Change the RUNNING stream(s) to XRUN state.
1698 static int snd_pcm_xrun(struct snd_pcm_substream *substream)
1700 struct snd_pcm_runtime *runtime = substream->runtime;
1703 snd_pcm_stream_lock_irq(substream);
1704 switch (runtime->status->state) {
1705 case SNDRV_PCM_STATE_XRUN:
1706 result = 0; /* already there */
1708 case SNDRV_PCM_STATE_RUNNING:
1709 __snd_pcm_xrun(substream);
1715 snd_pcm_stream_unlock_irq(substream);
1722 static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
1724 struct snd_pcm_runtime *runtime = substream->runtime;
1725 switch (runtime->status->state) {
1726 case SNDRV_PCM_STATE_RUNNING:
1727 case SNDRV_PCM_STATE_PREPARED:
1728 case SNDRV_PCM_STATE_PAUSED:
1729 case SNDRV_PCM_STATE_SUSPENDED:
1736 static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
1738 struct snd_pcm_runtime *runtime = substream->runtime;
1739 int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
1742 runtime->hw_ptr_base = 0;
1743 runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
1744 runtime->status->hw_ptr % runtime->period_size;
1745 runtime->silence_start = runtime->status->hw_ptr;
1746 runtime->silence_filled = 0;
1750 static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state)
1752 struct snd_pcm_runtime *runtime = substream->runtime;
1753 runtime->control->appl_ptr = runtime->status->hw_ptr;
1754 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1755 runtime->silence_size > 0)
1756 snd_pcm_playback_silence(substream, ULONG_MAX);
1759 static const struct action_ops snd_pcm_action_reset = {
1760 .pre_action = snd_pcm_pre_reset,
1761 .do_action = snd_pcm_do_reset,
1762 .post_action = snd_pcm_post_reset
1765 static int snd_pcm_reset(struct snd_pcm_substream *substream)
1767 return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0);
1773 /* we use the second argument for updating f_flags */
1774 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
1777 struct snd_pcm_runtime *runtime = substream->runtime;
1778 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1779 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1781 if (snd_pcm_running(substream))
1783 substream->f_flags = f_flags;
1787 static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
1790 snd_pcm_sync_stop(substream);
1791 err = substream->ops->prepare(substream);
1794 return snd_pcm_do_reset(substream, 0);
1797 static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
1799 struct snd_pcm_runtime *runtime = substream->runtime;
1800 runtime->control->appl_ptr = runtime->status->hw_ptr;
1801 snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
1804 static const struct action_ops snd_pcm_action_prepare = {
1805 .pre_action = snd_pcm_pre_prepare,
1806 .do_action = snd_pcm_do_prepare,
1807 .post_action = snd_pcm_post_prepare
1811 * snd_pcm_prepare - prepare the PCM substream to be triggerable
1812 * @substream: the PCM substream instance
1813 * @file: file to refer f_flags
1815 * Return: Zero if successful, or a negative error code.
1817 static int snd_pcm_prepare(struct snd_pcm_substream *substream,
1823 f_flags = file->f_flags;
1825 f_flags = substream->f_flags;
1827 snd_pcm_stream_lock_irq(substream);
1828 switch (substream->runtime->status->state) {
1829 case SNDRV_PCM_STATE_PAUSED:
1830 snd_pcm_pause(substream, 0);
1832 case SNDRV_PCM_STATE_SUSPENDED:
1833 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1836 snd_pcm_stream_unlock_irq(substream);
1838 return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
1839 substream, f_flags);
1846 static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
1848 struct snd_pcm_runtime *runtime = substream->runtime;
1849 switch (runtime->status->state) {
1850 case SNDRV_PCM_STATE_OPEN:
1851 case SNDRV_PCM_STATE_DISCONNECTED:
1852 case SNDRV_PCM_STATE_SUSPENDED:
1855 runtime->trigger_master = substream;
1859 static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
1861 struct snd_pcm_runtime *runtime = substream->runtime;
1862 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1863 switch (runtime->status->state) {
1864 case SNDRV_PCM_STATE_PREPARED:
1865 /* start playback stream if possible */
1866 if (! snd_pcm_playback_empty(substream)) {
1867 snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
1868 snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
1870 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1873 case SNDRV_PCM_STATE_RUNNING:
1874 runtime->status->state = SNDRV_PCM_STATE_DRAINING;
1876 case SNDRV_PCM_STATE_XRUN:
1877 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1883 /* stop running stream */
1884 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
1885 int new_state = snd_pcm_capture_avail(runtime) > 0 ?
1886 SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
1887 snd_pcm_do_stop(substream, new_state);
1888 snd_pcm_post_stop(substream, new_state);
1892 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
1893 runtime->trigger_master == substream &&
1894 (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
1895 return substream->ops->trigger(substream,
1896 SNDRV_PCM_TRIGGER_DRAIN);
1901 static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state)
1905 static const struct action_ops snd_pcm_action_drain_init = {
1906 .pre_action = snd_pcm_pre_drain_init,
1907 .do_action = snd_pcm_do_drain_init,
1908 .post_action = snd_pcm_post_drain_init
1912 * Drain the stream(s).
1913 * When the substream is linked, sync until the draining of all playback streams
1915 * After this call, all streams are supposed to be either SETUP or DRAINING
1916 * (capture only) state.
1918 static int snd_pcm_drain(struct snd_pcm_substream *substream,
1921 struct snd_card *card;
1922 struct snd_pcm_runtime *runtime;
1923 struct snd_pcm_substream *s;
1924 struct snd_pcm_group *group;
1925 wait_queue_entry_t wait;
1929 card = substream->pcm->card;
1930 runtime = substream->runtime;
1932 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1936 if (file->f_flags & O_NONBLOCK)
1938 } else if (substream->f_flags & O_NONBLOCK)
1941 snd_pcm_stream_lock_irq(substream);
1943 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1944 snd_pcm_pause(substream, 0);
1946 /* pre-start/stop - all running streams are changed to DRAINING state */
1947 result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
1950 /* in non-blocking, we don't wait in ioctl but let caller poll */
1958 struct snd_pcm_runtime *to_check;
1959 if (signal_pending(current)) {
1960 result = -ERESTARTSYS;
1963 /* find a substream to drain */
1965 group = snd_pcm_stream_group_ref(substream);
1966 snd_pcm_group_for_each_entry(s, substream) {
1967 if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
1969 runtime = s->runtime;
1970 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
1975 snd_pcm_group_unref(group, substream);
1977 break; /* all drained */
1978 init_waitqueue_entry(&wait, current);
1979 set_current_state(TASK_INTERRUPTIBLE);
1980 add_wait_queue(&to_check->sleep, &wait);
1981 snd_pcm_stream_unlock_irq(substream);
1982 if (runtime->no_period_wakeup)
1983 tout = MAX_SCHEDULE_TIMEOUT;
1986 if (runtime->rate) {
1987 long t = runtime->period_size * 2 / runtime->rate;
1988 tout = max(t, tout);
1990 tout = msecs_to_jiffies(tout * 1000);
1992 tout = schedule_timeout(tout);
1994 snd_pcm_stream_lock_irq(substream);
1995 group = snd_pcm_stream_group_ref(substream);
1996 snd_pcm_group_for_each_entry(s, substream) {
1997 if (s->runtime == to_check) {
1998 remove_wait_queue(&to_check->sleep, &wait);
2002 snd_pcm_group_unref(group, substream);
2004 if (card->shutdown) {
2009 if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
2012 dev_dbg(substream->pcm->card->dev,
2013 "playback drain error (DMA or IRQ trouble?)\n");
2014 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2022 snd_pcm_stream_unlock_irq(substream);
2030 * Immediately put all linked substreams into SETUP state.
2032 static int snd_pcm_drop(struct snd_pcm_substream *substream)
2034 struct snd_pcm_runtime *runtime;
2037 if (PCM_RUNTIME_CHECK(substream))
2039 runtime = substream->runtime;
2041 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
2042 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
2045 snd_pcm_stream_lock_irq(substream);
2047 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
2048 snd_pcm_pause(substream, 0);
2050 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2051 /* runtime->control->appl_ptr = runtime->status->hw_ptr; */
2052 snd_pcm_stream_unlock_irq(substream);
2058 static bool is_pcm_file(struct file *file)
2060 struct inode *inode = file_inode(file);
2061 struct snd_pcm *pcm;
2064 if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
2066 minor = iminor(inode);
2067 pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2069 pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2072 snd_card_unref(pcm->card);
2079 static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
2082 struct snd_pcm_file *pcm_file;
2083 struct snd_pcm_substream *substream1;
2084 struct snd_pcm_group *group, *target_group;
2085 bool nonatomic = substream->pcm->nonatomic;
2086 struct fd f = fdget(fd);
2090 if (!is_pcm_file(f.file)) {
2094 pcm_file = f.file->private_data;
2095 substream1 = pcm_file->substream;
2096 group = kzalloc(sizeof(*group), GFP_KERNEL);
2101 snd_pcm_group_init(group);
2103 down_write(&snd_pcm_link_rwsem);
2104 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
2105 substream->runtime->status->state != substream1->runtime->status->state ||
2106 substream->pcm->nonatomic != substream1->pcm->nonatomic) {
2110 if (snd_pcm_stream_linked(substream1)) {
2115 snd_pcm_stream_lock_irq(substream);
2116 if (!snd_pcm_stream_linked(substream)) {
2117 snd_pcm_group_assign(substream, group);
2118 group = NULL; /* assigned, don't free this one below */
2120 target_group = substream->group;
2121 snd_pcm_stream_unlock_irq(substream);
2123 snd_pcm_group_lock_irq(target_group, nonatomic);
2124 snd_pcm_stream_lock(substream1);
2125 snd_pcm_group_assign(substream1, target_group);
2126 refcount_inc(&target_group->refs);
2127 snd_pcm_stream_unlock(substream1);
2128 snd_pcm_group_unlock_irq(target_group, nonatomic);
2130 up_write(&snd_pcm_link_rwsem);
2138 static void relink_to_local(struct snd_pcm_substream *substream)
2140 snd_pcm_stream_lock(substream);
2141 snd_pcm_group_assign(substream, &substream->self_group);
2142 snd_pcm_stream_unlock(substream);
2145 static int snd_pcm_unlink(struct snd_pcm_substream *substream)
2147 struct snd_pcm_group *group;
2148 bool nonatomic = substream->pcm->nonatomic;
2149 bool do_free = false;
2152 down_write(&snd_pcm_link_rwsem);
2154 if (!snd_pcm_stream_linked(substream)) {
2159 group = substream->group;
2160 snd_pcm_group_lock_irq(group, nonatomic);
2162 relink_to_local(substream);
2163 refcount_dec(&group->refs);
2165 /* detach the last stream, too */
2166 if (list_is_singular(&group->substreams)) {
2167 relink_to_local(list_first_entry(&group->substreams,
2168 struct snd_pcm_substream,
2170 do_free = refcount_dec_and_test(&group->refs);
2173 snd_pcm_group_unlock_irq(group, nonatomic);
2178 up_write(&snd_pcm_link_rwsem);
2185 static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
2186 struct snd_pcm_hw_rule *rule)
2188 struct snd_interval t;
2189 snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
2190 hw_param_interval_c(params, rule->deps[1]), &t);
2191 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2194 static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
2195 struct snd_pcm_hw_rule *rule)
2197 struct snd_interval t;
2198 snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
2199 hw_param_interval_c(params, rule->deps[1]), &t);
2200 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2203 static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
2204 struct snd_pcm_hw_rule *rule)
2206 struct snd_interval t;
2207 snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
2208 hw_param_interval_c(params, rule->deps[1]),
2209 (unsigned long) rule->private, &t);
2210 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2213 static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
2214 struct snd_pcm_hw_rule *rule)
2216 struct snd_interval t;
2217 snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
2218 (unsigned long) rule->private,
2219 hw_param_interval_c(params, rule->deps[1]), &t);
2220 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2223 static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
2224 struct snd_pcm_hw_rule *rule)
2227 const struct snd_interval *i =
2228 hw_param_interval_c(params, rule->deps[0]);
2230 struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2232 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2234 if (! snd_mask_test(mask, k))
2236 bits = snd_pcm_format_physical_width(k);
2238 continue; /* ignore invalid formats */
2239 if ((unsigned)bits < i->min || (unsigned)bits > i->max)
2240 snd_mask_reset(&m, k);
2242 return snd_mask_refine(mask, &m);
2245 static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
2246 struct snd_pcm_hw_rule *rule)
2248 struct snd_interval t;
2254 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2256 if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
2258 bits = snd_pcm_format_physical_width(k);
2260 continue; /* ignore invalid formats */
2261 if (t.min > (unsigned)bits)
2263 if (t.max < (unsigned)bits)
2267 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2270 #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12
2271 #error "Change this table"
2274 static const unsigned int rates[] = {
2275 5512, 8000, 11025, 16000, 22050, 32000, 44100,
2276 48000, 64000, 88200, 96000, 176400, 192000, 352800, 384000
2279 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
2280 .count = ARRAY_SIZE(rates),
2284 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
2285 struct snd_pcm_hw_rule *rule)
2287 struct snd_pcm_hardware *hw = rule->private;
2288 return snd_interval_list(hw_param_interval(params, rule->var),
2289 snd_pcm_known_rates.count,
2290 snd_pcm_known_rates.list, hw->rates);
2293 static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
2294 struct snd_pcm_hw_rule *rule)
2296 struct snd_interval t;
2297 struct snd_pcm_substream *substream = rule->private;
2299 t.max = substream->buffer_bytes_max;
2303 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2306 int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
2308 struct snd_pcm_runtime *runtime = substream->runtime;
2309 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
2312 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
2313 snd_mask_any(constrs_mask(constrs, k));
2316 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
2317 snd_interval_any(constrs_interval(constrs, k));
2320 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
2321 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
2322 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
2323 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
2324 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
2326 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
2327 snd_pcm_hw_rule_format, NULL,
2328 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2331 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2332 snd_pcm_hw_rule_sample_bits, NULL,
2333 SNDRV_PCM_HW_PARAM_FORMAT,
2334 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2337 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2338 snd_pcm_hw_rule_div, NULL,
2339 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2342 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2343 snd_pcm_hw_rule_mul, NULL,
2344 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2347 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2348 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2349 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2352 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2353 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2354 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
2357 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2358 snd_pcm_hw_rule_div, NULL,
2359 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2362 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2363 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2364 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
2367 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2368 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2369 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
2372 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
2373 snd_pcm_hw_rule_div, NULL,
2374 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2377 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2378 snd_pcm_hw_rule_div, NULL,
2379 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2382 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2383 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2384 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2387 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2388 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2389 SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2392 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2393 snd_pcm_hw_rule_mul, NULL,
2394 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2397 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2398 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2399 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2402 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2403 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2404 SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2407 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2408 snd_pcm_hw_rule_muldivk, (void*) 8,
2409 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2412 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2413 snd_pcm_hw_rule_muldivk, (void*) 8,
2414 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2417 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
2418 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2419 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2422 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
2423 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2424 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2430 int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
2432 struct snd_pcm_runtime *runtime = substream->runtime;
2433 struct snd_pcm_hardware *hw = &runtime->hw;
2435 unsigned int mask = 0;
2437 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2438 mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED;
2439 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2440 mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED;
2441 if (hw_support_mmap(substream)) {
2442 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2443 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED;
2444 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2445 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
2446 if (hw->info & SNDRV_PCM_INFO_COMPLEX)
2447 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX;
2449 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
2453 err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
2457 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD);
2461 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
2462 hw->channels_min, hw->channels_max);
2466 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
2467 hw->rate_min, hw->rate_max);
2471 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2472 hw->period_bytes_min, hw->period_bytes_max);
2476 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
2477 hw->periods_min, hw->periods_max);
2481 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2482 hw->period_bytes_min, hw->buffer_bytes_max);
2486 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2487 snd_pcm_hw_rule_buffer_bytes_max, substream,
2488 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
2493 if (runtime->dma_bytes) {
2494 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
2499 if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
2500 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2501 snd_pcm_hw_rule_rate, hw,
2502 SNDRV_PCM_HW_PARAM_RATE, -1);
2507 /* FIXME: this belong to lowlevel */
2508 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
2513 static void pcm_release_private(struct snd_pcm_substream *substream)
2515 if (snd_pcm_stream_linked(substream))
2516 snd_pcm_unlink(substream);
2519 void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2521 substream->ref_count--;
2522 if (substream->ref_count > 0)
2525 snd_pcm_drop(substream);
2526 if (substream->hw_opened) {
2527 if (substream->ops->hw_free &&
2528 substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
2529 substream->ops->hw_free(substream);
2530 substream->ops->close(substream);
2531 substream->hw_opened = 0;
2533 if (pm_qos_request_active(&substream->latency_pm_qos_req))
2534 pm_qos_remove_request(&substream->latency_pm_qos_req);
2535 if (substream->pcm_release) {
2536 substream->pcm_release(substream);
2537 substream->pcm_release = NULL;
2539 snd_pcm_detach_substream(substream);
2541 EXPORT_SYMBOL(snd_pcm_release_substream);
2543 int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
2545 struct snd_pcm_substream **rsubstream)
2547 struct snd_pcm_substream *substream;
2550 err = snd_pcm_attach_substream(pcm, stream, file, &substream);
2553 if (substream->ref_count > 1) {
2554 *rsubstream = substream;
2558 err = snd_pcm_hw_constraints_init(substream);
2560 pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
2564 if ((err = substream->ops->open(substream)) < 0)
2567 substream->hw_opened = 1;
2569 err = snd_pcm_hw_constraints_complete(substream);
2571 pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
2575 *rsubstream = substream;
2579 snd_pcm_release_substream(substream);
2582 EXPORT_SYMBOL(snd_pcm_open_substream);
2584 static int snd_pcm_open_file(struct file *file,
2585 struct snd_pcm *pcm,
2588 struct snd_pcm_file *pcm_file;
2589 struct snd_pcm_substream *substream;
2592 err = snd_pcm_open_substream(pcm, stream, file, &substream);
2596 pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
2597 if (pcm_file == NULL) {
2598 snd_pcm_release_substream(substream);
2601 pcm_file->substream = substream;
2602 if (substream->ref_count == 1)
2603 substream->pcm_release = pcm_release_private;
2604 file->private_data = pcm_file;
2609 static int snd_pcm_playback_open(struct inode *inode, struct file *file)
2611 struct snd_pcm *pcm;
2612 int err = nonseekable_open(inode, file);
2615 pcm = snd_lookup_minor_data(iminor(inode),
2616 SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2617 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
2619 snd_card_unref(pcm->card);
2623 static int snd_pcm_capture_open(struct inode *inode, struct file *file)
2625 struct snd_pcm *pcm;
2626 int err = nonseekable_open(inode, file);
2629 pcm = snd_lookup_minor_data(iminor(inode),
2630 SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2631 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
2633 snd_card_unref(pcm->card);
2637 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
2640 wait_queue_entry_t wait;
2646 err = snd_card_file_add(pcm->card, file);
2649 if (!try_module_get(pcm->card->module)) {
2653 init_waitqueue_entry(&wait, current);
2654 add_wait_queue(&pcm->open_wait, &wait);
2655 mutex_lock(&pcm->open_mutex);
2657 err = snd_pcm_open_file(file, pcm, stream);
2660 if (err == -EAGAIN) {
2661 if (file->f_flags & O_NONBLOCK) {
2667 set_current_state(TASK_INTERRUPTIBLE);
2668 mutex_unlock(&pcm->open_mutex);
2670 mutex_lock(&pcm->open_mutex);
2671 if (pcm->card->shutdown) {
2675 if (signal_pending(current)) {
2680 remove_wait_queue(&pcm->open_wait, &wait);
2681 mutex_unlock(&pcm->open_mutex);
2687 module_put(pcm->card->module);
2689 snd_card_file_remove(pcm->card, file);
2694 static int snd_pcm_release(struct inode *inode, struct file *file)
2696 struct snd_pcm *pcm;
2697 struct snd_pcm_substream *substream;
2698 struct snd_pcm_file *pcm_file;
2700 pcm_file = file->private_data;
2701 substream = pcm_file->substream;
2702 if (snd_BUG_ON(!substream))
2704 pcm = substream->pcm;
2705 mutex_lock(&pcm->open_mutex);
2706 snd_pcm_release_substream(substream);
2708 mutex_unlock(&pcm->open_mutex);
2709 wake_up(&pcm->open_wait);
2710 module_put(pcm->card->module);
2711 snd_card_file_remove(pcm->card, file);
2715 /* check and update PCM state; return 0 or a negative error
2716 * call this inside PCM lock
2718 static int do_pcm_hwsync(struct snd_pcm_substream *substream)
2720 switch (substream->runtime->status->state) {
2721 case SNDRV_PCM_STATE_DRAINING:
2722 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
2725 case SNDRV_PCM_STATE_RUNNING:
2726 return snd_pcm_update_hw_ptr(substream);
2727 case SNDRV_PCM_STATE_PREPARED:
2728 case SNDRV_PCM_STATE_PAUSED:
2730 case SNDRV_PCM_STATE_SUSPENDED:
2732 case SNDRV_PCM_STATE_XRUN:
2739 /* increase the appl_ptr; returns the processed frames or a negative error */
2740 static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2741 snd_pcm_uframes_t frames,
2742 snd_pcm_sframes_t avail)
2744 struct snd_pcm_runtime *runtime = substream->runtime;
2745 snd_pcm_sframes_t appl_ptr;
2750 if (frames > (snd_pcm_uframes_t)avail)
2752 appl_ptr = runtime->control->appl_ptr + frames;
2753 if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
2754 appl_ptr -= runtime->boundary;
2755 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2756 return ret < 0 ? ret : frames;
2759 /* decrease the appl_ptr; returns the processed frames or zero for error */
2760 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2761 snd_pcm_uframes_t frames,
2762 snd_pcm_sframes_t avail)
2764 struct snd_pcm_runtime *runtime = substream->runtime;
2765 snd_pcm_sframes_t appl_ptr;
2770 if (frames > (snd_pcm_uframes_t)avail)
2772 appl_ptr = runtime->control->appl_ptr - frames;
2774 appl_ptr += runtime->boundary;
2775 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2776 /* NOTE: we return zero for errors because PulseAudio gets depressed
2777 * upon receiving an error from rewind ioctl and stops processing
2778 * any longer. Returning zero means that no rewind is done, so
2779 * it's not absolutely wrong to answer like that.
2781 return ret < 0 ? 0 : frames;
2784 static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream,
2785 snd_pcm_uframes_t frames)
2787 snd_pcm_sframes_t ret;
2792 snd_pcm_stream_lock_irq(substream);
2793 ret = do_pcm_hwsync(substream);
2795 ret = rewind_appl_ptr(substream, frames,
2796 snd_pcm_hw_avail(substream));
2797 snd_pcm_stream_unlock_irq(substream);
2801 static snd_pcm_sframes_t snd_pcm_forward(struct snd_pcm_substream *substream,
2802 snd_pcm_uframes_t frames)
2804 snd_pcm_sframes_t ret;
2809 snd_pcm_stream_lock_irq(substream);
2810 ret = do_pcm_hwsync(substream);
2812 ret = forward_appl_ptr(substream, frames,
2813 snd_pcm_avail(substream));
2814 snd_pcm_stream_unlock_irq(substream);
2818 static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
2822 snd_pcm_stream_lock_irq(substream);
2823 err = do_pcm_hwsync(substream);
2824 snd_pcm_stream_unlock_irq(substream);
2828 static int snd_pcm_delay(struct snd_pcm_substream *substream,
2829 snd_pcm_sframes_t *delay)
2832 snd_pcm_sframes_t n = 0;
2834 snd_pcm_stream_lock_irq(substream);
2835 err = do_pcm_hwsync(substream);
2837 n = snd_pcm_calc_delay(substream);
2838 snd_pcm_stream_unlock_irq(substream);
2844 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2845 struct snd_pcm_sync_ptr __user *_sync_ptr)
2847 struct snd_pcm_runtime *runtime = substream->runtime;
2848 struct snd_pcm_sync_ptr sync_ptr;
2849 volatile struct snd_pcm_mmap_status *status;
2850 volatile struct snd_pcm_mmap_control *control;
2853 memset(&sync_ptr, 0, sizeof(sync_ptr));
2854 if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
2856 if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control)))
2858 status = runtime->status;
2859 control = runtime->control;
2860 if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
2861 err = snd_pcm_hwsync(substream);
2865 snd_pcm_stream_lock_irq(substream);
2866 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
2867 err = pcm_lib_apply_appl_ptr(substream,
2868 sync_ptr.c.control.appl_ptr);
2870 snd_pcm_stream_unlock_irq(substream);
2874 sync_ptr.c.control.appl_ptr = control->appl_ptr;
2876 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
2877 control->avail_min = sync_ptr.c.control.avail_min;
2879 sync_ptr.c.control.avail_min = control->avail_min;
2880 sync_ptr.s.status.state = status->state;
2881 sync_ptr.s.status.hw_ptr = status->hw_ptr;
2882 sync_ptr.s.status.tstamp = status->tstamp;
2883 sync_ptr.s.status.suspended_state = status->suspended_state;
2884 sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
2885 snd_pcm_stream_unlock_irq(substream);
2886 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
2891 static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
2893 struct snd_pcm_runtime *runtime = substream->runtime;
2896 if (get_user(arg, _arg))
2898 if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
2900 runtime->tstamp_type = arg;
2904 static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream *substream,
2905 struct snd_xferi __user *_xferi)
2907 struct snd_xferi xferi;
2908 struct snd_pcm_runtime *runtime = substream->runtime;
2909 snd_pcm_sframes_t result;
2911 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2913 if (put_user(0, &_xferi->result))
2915 if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
2917 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2918 result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
2920 result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
2921 __put_user(result, &_xferi->result);
2922 return result < 0 ? result : 0;
2925 static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream,
2926 struct snd_xfern __user *_xfern)
2928 struct snd_xfern xfern;
2929 struct snd_pcm_runtime *runtime = substream->runtime;
2931 snd_pcm_sframes_t result;
2933 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2935 if (runtime->channels > 128)
2937 if (put_user(0, &_xfern->result))
2939 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2942 bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels);
2944 return PTR_ERR(bufs);
2945 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2946 result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
2948 result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
2950 __put_user(result, &_xfern->result);
2951 return result < 0 ? result : 0;
2954 static int snd_pcm_rewind_ioctl(struct snd_pcm_substream *substream,
2955 snd_pcm_uframes_t __user *_frames)
2957 snd_pcm_uframes_t frames;
2958 snd_pcm_sframes_t result;
2960 if (get_user(frames, _frames))
2962 if (put_user(0, _frames))
2964 result = snd_pcm_rewind(substream, frames);
2965 __put_user(result, _frames);
2966 return result < 0 ? result : 0;
2969 static int snd_pcm_forward_ioctl(struct snd_pcm_substream *substream,
2970 snd_pcm_uframes_t __user *_frames)
2972 snd_pcm_uframes_t frames;
2973 snd_pcm_sframes_t result;
2975 if (get_user(frames, _frames))
2977 if (put_user(0, _frames))
2979 result = snd_pcm_forward(substream, frames);
2980 __put_user(result, _frames);
2981 return result < 0 ? result : 0;
2984 static int snd_pcm_common_ioctl(struct file *file,
2985 struct snd_pcm_substream *substream,
2986 unsigned int cmd, void __user *arg)
2988 struct snd_pcm_file *pcm_file = file->private_data;
2991 if (PCM_RUNTIME_CHECK(substream))
2994 res = snd_power_wait(substream->pcm->card, SNDRV_CTL_POWER_D0);
2999 case SNDRV_PCM_IOCTL_PVERSION:
3000 return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
3001 case SNDRV_PCM_IOCTL_INFO:
3002 return snd_pcm_info_user(substream, arg);
3003 case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */
3005 case SNDRV_PCM_IOCTL_TTSTAMP:
3006 return snd_pcm_tstamp(substream, arg);
3007 case SNDRV_PCM_IOCTL_USER_PVERSION:
3008 if (get_user(pcm_file->user_pversion,
3009 (unsigned int __user *)arg))
3012 case SNDRV_PCM_IOCTL_HW_REFINE:
3013 return snd_pcm_hw_refine_user(substream, arg);
3014 case SNDRV_PCM_IOCTL_HW_PARAMS:
3015 return snd_pcm_hw_params_user(substream, arg);
3016 case SNDRV_PCM_IOCTL_HW_FREE:
3017 return snd_pcm_hw_free(substream);
3018 case SNDRV_PCM_IOCTL_SW_PARAMS:
3019 return snd_pcm_sw_params_user(substream, arg);
3020 case SNDRV_PCM_IOCTL_STATUS32:
3021 return snd_pcm_status_user32(substream, arg, false);
3022 case SNDRV_PCM_IOCTL_STATUS_EXT32:
3023 return snd_pcm_status_user32(substream, arg, true);
3024 case SNDRV_PCM_IOCTL_STATUS64:
3025 return snd_pcm_status_user64(substream, arg, false);
3026 case SNDRV_PCM_IOCTL_STATUS_EXT64:
3027 return snd_pcm_status_user64(substream, arg, true);
3028 case SNDRV_PCM_IOCTL_CHANNEL_INFO:
3029 return snd_pcm_channel_info_user(substream, arg);
3030 case SNDRV_PCM_IOCTL_PREPARE:
3031 return snd_pcm_prepare(substream, file);
3032 case SNDRV_PCM_IOCTL_RESET:
3033 return snd_pcm_reset(substream);
3034 case SNDRV_PCM_IOCTL_START:
3035 return snd_pcm_start_lock_irq(substream);
3036 case SNDRV_PCM_IOCTL_LINK:
3037 return snd_pcm_link(substream, (int)(unsigned long) arg);
3038 case SNDRV_PCM_IOCTL_UNLINK:
3039 return snd_pcm_unlink(substream);
3040 case SNDRV_PCM_IOCTL_RESUME:
3041 return snd_pcm_resume(substream);
3042 case SNDRV_PCM_IOCTL_XRUN:
3043 return snd_pcm_xrun(substream);
3044 case SNDRV_PCM_IOCTL_HWSYNC:
3045 return snd_pcm_hwsync(substream);
3046 case SNDRV_PCM_IOCTL_DELAY:
3048 snd_pcm_sframes_t delay;
3049 snd_pcm_sframes_t __user *res = arg;
3052 err = snd_pcm_delay(substream, &delay);
3055 if (put_user(delay, res))
3059 case SNDRV_PCM_IOCTL_SYNC_PTR:
3060 return snd_pcm_sync_ptr(substream, arg);
3061 #ifdef CONFIG_SND_SUPPORT_OLD_API
3062 case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
3063 return snd_pcm_hw_refine_old_user(substream, arg);
3064 case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
3065 return snd_pcm_hw_params_old_user(substream, arg);
3067 case SNDRV_PCM_IOCTL_DRAIN:
3068 return snd_pcm_drain(substream, file);
3069 case SNDRV_PCM_IOCTL_DROP:
3070 return snd_pcm_drop(substream);
3071 case SNDRV_PCM_IOCTL_PAUSE:
3072 return snd_pcm_action_lock_irq(&snd_pcm_action_pause,
3074 (int)(unsigned long)arg);
3075 case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
3076 case SNDRV_PCM_IOCTL_READI_FRAMES:
3077 return snd_pcm_xferi_frames_ioctl(substream, arg);
3078 case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
3079 case SNDRV_PCM_IOCTL_READN_FRAMES:
3080 return snd_pcm_xfern_frames_ioctl(substream, arg);
3081 case SNDRV_PCM_IOCTL_REWIND:
3082 return snd_pcm_rewind_ioctl(substream, arg);
3083 case SNDRV_PCM_IOCTL_FORWARD:
3084 return snd_pcm_forward_ioctl(substream, arg);
3086 pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
3090 static long snd_pcm_ioctl(struct file *file, unsigned int cmd,
3093 struct snd_pcm_file *pcm_file;
3095 pcm_file = file->private_data;
3097 if (((cmd >> 8) & 0xff) != 'A')
3100 return snd_pcm_common_ioctl(file, pcm_file->substream, cmd,
3101 (void __user *)arg);
3105 * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
3106 * @substream: PCM substream
3108 * @arg: IOCTL argument
3110 * The function is provided primarily for OSS layer and USB gadget drivers,
3111 * and it allows only the limited set of ioctls (hw_params, sw_params,
3112 * prepare, start, drain, drop, forward).
3114 int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3115 unsigned int cmd, void *arg)
3117 snd_pcm_uframes_t *frames = arg;
3118 snd_pcm_sframes_t result;
3121 case SNDRV_PCM_IOCTL_FORWARD:
3123 /* provided only for OSS; capture-only and no value returned */
3124 if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
3126 result = snd_pcm_forward(substream, *frames);
3127 return result < 0 ? result : 0;
3129 case SNDRV_PCM_IOCTL_HW_PARAMS:
3130 return snd_pcm_hw_params(substream, arg);
3131 case SNDRV_PCM_IOCTL_SW_PARAMS:
3132 return snd_pcm_sw_params(substream, arg);
3133 case SNDRV_PCM_IOCTL_PREPARE:
3134 return snd_pcm_prepare(substream, NULL);
3135 case SNDRV_PCM_IOCTL_START:
3136 return snd_pcm_start_lock_irq(substream);
3137 case SNDRV_PCM_IOCTL_DRAIN:
3138 return snd_pcm_drain(substream, NULL);
3139 case SNDRV_PCM_IOCTL_DROP:
3140 return snd_pcm_drop(substream);
3141 case SNDRV_PCM_IOCTL_DELAY:
3142 return snd_pcm_delay(substream, frames);
3147 EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
3149 static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
3152 struct snd_pcm_file *pcm_file;
3153 struct snd_pcm_substream *substream;
3154 struct snd_pcm_runtime *runtime;
3155 snd_pcm_sframes_t result;
3157 pcm_file = file->private_data;
3158 substream = pcm_file->substream;
3159 if (PCM_RUNTIME_CHECK(substream))
3161 runtime = substream->runtime;
3162 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3164 if (!frame_aligned(runtime, count))
3166 count = bytes_to_frames(runtime, count);
3167 result = snd_pcm_lib_read(substream, buf, count);
3169 result = frames_to_bytes(runtime, result);
3173 static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
3174 size_t count, loff_t * offset)
3176 struct snd_pcm_file *pcm_file;
3177 struct snd_pcm_substream *substream;
3178 struct snd_pcm_runtime *runtime;
3179 snd_pcm_sframes_t result;
3181 pcm_file = file->private_data;
3182 substream = pcm_file->substream;
3183 if (PCM_RUNTIME_CHECK(substream))
3185 runtime = substream->runtime;
3186 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3188 if (!frame_aligned(runtime, count))
3190 count = bytes_to_frames(runtime, count);
3191 result = snd_pcm_lib_write(substream, buf, count);
3193 result = frames_to_bytes(runtime, result);
3197 static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
3199 struct snd_pcm_file *pcm_file;
3200 struct snd_pcm_substream *substream;
3201 struct snd_pcm_runtime *runtime;
3202 snd_pcm_sframes_t result;
3205 snd_pcm_uframes_t frames;
3207 pcm_file = iocb->ki_filp->private_data;
3208 substream = pcm_file->substream;
3209 if (PCM_RUNTIME_CHECK(substream))
3211 runtime = substream->runtime;
3212 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3214 if (!iter_is_iovec(to))
3216 if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
3218 if (!frame_aligned(runtime, to->iov->iov_len))
3220 frames = bytes_to_samples(runtime, to->iov->iov_len);
3221 bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
3224 for (i = 0; i < to->nr_segs; ++i)
3225 bufs[i] = to->iov[i].iov_base;
3226 result = snd_pcm_lib_readv(substream, bufs, frames);
3228 result = frames_to_bytes(runtime, result);
3233 static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
3235 struct snd_pcm_file *pcm_file;
3236 struct snd_pcm_substream *substream;
3237 struct snd_pcm_runtime *runtime;
3238 snd_pcm_sframes_t result;
3241 snd_pcm_uframes_t frames;
3243 pcm_file = iocb->ki_filp->private_data;
3244 substream = pcm_file->substream;
3245 if (PCM_RUNTIME_CHECK(substream))
3247 runtime = substream->runtime;
3248 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3250 if (!iter_is_iovec(from))
3252 if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
3253 !frame_aligned(runtime, from->iov->iov_len))
3255 frames = bytes_to_samples(runtime, from->iov->iov_len);
3256 bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL);
3259 for (i = 0; i < from->nr_segs; ++i)
3260 bufs[i] = from->iov[i].iov_base;
3261 result = snd_pcm_lib_writev(substream, bufs, frames);
3263 result = frames_to_bytes(runtime, result);
3268 static __poll_t snd_pcm_poll(struct file *file, poll_table *wait)
3270 struct snd_pcm_file *pcm_file;
3271 struct snd_pcm_substream *substream;
3272 struct snd_pcm_runtime *runtime;
3274 snd_pcm_uframes_t avail;
3276 pcm_file = file->private_data;
3278 substream = pcm_file->substream;
3279 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3280 ok = EPOLLOUT | EPOLLWRNORM;
3282 ok = EPOLLIN | EPOLLRDNORM;
3283 if (PCM_RUNTIME_CHECK(substream))
3284 return ok | EPOLLERR;
3286 runtime = substream->runtime;
3287 poll_wait(file, &runtime->sleep, wait);
3290 snd_pcm_stream_lock_irq(substream);
3291 avail = snd_pcm_avail(substream);
3292 switch (runtime->status->state) {
3293 case SNDRV_PCM_STATE_RUNNING:
3294 case SNDRV_PCM_STATE_PREPARED:
3295 case SNDRV_PCM_STATE_PAUSED:
3296 if (avail >= runtime->control->avail_min)
3299 case SNDRV_PCM_STATE_DRAINING:
3300 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
3307 mask = ok | EPOLLERR;
3310 snd_pcm_stream_unlock_irq(substream);
3319 * Only on coherent architectures, we can mmap the status and the control records
3320 * for effcient data transfer. On others, we have to use HWSYNC ioctl...
3322 #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
3324 * mmap status record
3326 static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3328 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3329 struct snd_pcm_runtime *runtime;
3331 if (substream == NULL)
3332 return VM_FAULT_SIGBUS;
3333 runtime = substream->runtime;
3334 vmf->page = virt_to_page(runtime->status);
3335 get_page(vmf->page);
3339 static const struct vm_operations_struct snd_pcm_vm_ops_status =
3341 .fault = snd_pcm_mmap_status_fault,
3344 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3345 struct vm_area_struct *area)
3348 if (!(area->vm_flags & VM_READ))
3350 size = area->vm_end - area->vm_start;
3351 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3353 area->vm_ops = &snd_pcm_vm_ops_status;
3354 area->vm_private_data = substream;
3355 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3360 * mmap control record
3362 static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3364 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3365 struct snd_pcm_runtime *runtime;
3367 if (substream == NULL)
3368 return VM_FAULT_SIGBUS;
3369 runtime = substream->runtime;
3370 vmf->page = virt_to_page(runtime->control);
3371 get_page(vmf->page);
3375 static const struct vm_operations_struct snd_pcm_vm_ops_control =
3377 .fault = snd_pcm_mmap_control_fault,
3380 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3381 struct vm_area_struct *area)
3384 if (!(area->vm_flags & VM_READ))
3386 size = area->vm_end - area->vm_start;
3387 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3389 area->vm_ops = &snd_pcm_vm_ops_control;
3390 area->vm_private_data = substream;
3391 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3395 static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
3397 if (pcm_file->no_compat_mmap)
3399 /* See pcm_control_mmap_allowed() below.
3400 * Since older alsa-lib requires both status and control mmaps to be
3401 * coupled, we have to disable the status mmap for old alsa-lib, too.
3403 if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
3404 (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR))
3409 static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file)
3411 if (pcm_file->no_compat_mmap)
3413 /* Disallow the control mmap when SYNC_APPLPTR flag is set;
3414 * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
3415 * thus it effectively assures the manual update of appl_ptr.
3417 if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
3422 #else /* ! coherent mmap */
3424 * don't support mmap for status and control records.
3426 #define pcm_status_mmap_allowed(pcm_file) false
3427 #define pcm_control_mmap_allowed(pcm_file) false
3429 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3430 struct vm_area_struct *area)
3434 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3435 struct vm_area_struct *area)
3439 #endif /* coherent mmap */
3441 static inline struct page *
3442 snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
3444 void *vaddr = substream->runtime->dma_area + ofs;
3446 switch (substream->dma_buffer.dev.type) {
3447 #ifdef CONFIG_SND_DMA_SGBUF
3448 case SNDRV_DMA_TYPE_DEV_SG:
3449 case SNDRV_DMA_TYPE_DEV_UC_SG:
3450 return snd_pcm_sgbuf_ops_page(substream, ofs);
3451 #endif /* CONFIG_SND_DMA_SGBUF */
3452 case SNDRV_DMA_TYPE_VMALLOC:
3453 return vmalloc_to_page(vaddr);
3455 return virt_to_page(vaddr);
3460 * fault callback for mmapping a RAM page
3462 static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3464 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3465 struct snd_pcm_runtime *runtime;
3466 unsigned long offset;
3470 if (substream == NULL)
3471 return VM_FAULT_SIGBUS;
3472 runtime = substream->runtime;
3473 offset = vmf->pgoff << PAGE_SHIFT;
3474 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3475 if (offset > dma_bytes - PAGE_SIZE)
3476 return VM_FAULT_SIGBUS;
3477 if (substream->ops->page)
3478 page = substream->ops->page(substream, offset);
3480 page = snd_pcm_default_page_ops(substream, offset);
3482 return VM_FAULT_SIGBUS;
3488 static const struct vm_operations_struct snd_pcm_vm_ops_data = {
3489 .open = snd_pcm_mmap_data_open,
3490 .close = snd_pcm_mmap_data_close,
3493 static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
3494 .open = snd_pcm_mmap_data_open,
3495 .close = snd_pcm_mmap_data_close,
3496 .fault = snd_pcm_mmap_data_fault,
3500 * mmap the DMA buffer on RAM
3504 * snd_pcm_lib_default_mmap - Default PCM data mmap function
3505 * @substream: PCM substream
3508 * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
3509 * this function is invoked implicitly.
3511 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3512 struct vm_area_struct *area)
3514 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3515 #ifdef CONFIG_GENERIC_ALLOCATOR
3516 if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
3517 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
3518 return remap_pfn_range(area, area->vm_start,
3519 substream->dma_buffer.addr >> PAGE_SHIFT,
3520 area->vm_end - area->vm_start, area->vm_page_prot);
3522 #endif /* CONFIG_GENERIC_ALLOCATOR */
3523 #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
3524 if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
3525 (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
3526 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
3527 return dma_mmap_coherent(substream->dma_buffer.dev.dev,
3529 substream->runtime->dma_area,
3530 substream->runtime->dma_addr,
3531 substream->runtime->dma_bytes);
3532 #endif /* CONFIG_X86 */
3533 /* mmap with fault handler */
3534 area->vm_ops = &snd_pcm_vm_ops_data_fault;
3537 EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
3540 * mmap the DMA buffer on I/O memory area
3542 #if SNDRV_PCM_INFO_MMAP_IOMEM
3544 * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
3545 * @substream: PCM substream
3548 * When your hardware uses the iomapped pages as the hardware buffer and
3549 * wants to mmap it, pass this function as mmap pcm_ops. Note that this
3550 * is supposed to work only on limited architectures.
3552 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
3553 struct vm_area_struct *area)
3555 struct snd_pcm_runtime *runtime = substream->runtime;
3557 area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
3558 return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
3560 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
3561 #endif /* SNDRV_PCM_INFO_MMAP */
3566 int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
3567 struct vm_area_struct *area)
3569 struct snd_pcm_runtime *runtime;
3571 unsigned long offset;
3575 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3576 if (!(area->vm_flags & (VM_WRITE|VM_READ)))
3579 if (!(area->vm_flags & VM_READ))
3582 runtime = substream->runtime;
3583 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3585 if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
3587 if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
3588 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
3590 size = area->vm_end - area->vm_start;
3591 offset = area->vm_pgoff << PAGE_SHIFT;
3592 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3593 if ((size_t)size > dma_bytes)
3595 if (offset > dma_bytes - size)
3598 area->vm_ops = &snd_pcm_vm_ops_data;
3599 area->vm_private_data = substream;
3600 if (substream->ops->mmap)
3601 err = substream->ops->mmap(substream, area);
3603 err = snd_pcm_lib_default_mmap(substream, area);
3605 atomic_inc(&substream->mmap_count);
3608 EXPORT_SYMBOL(snd_pcm_mmap_data);
3610 static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
3612 struct snd_pcm_file * pcm_file;
3613 struct snd_pcm_substream *substream;
3614 unsigned long offset;
3616 pcm_file = file->private_data;
3617 substream = pcm_file->substream;
3618 if (PCM_RUNTIME_CHECK(substream))
3621 offset = area->vm_pgoff << PAGE_SHIFT;
3623 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3624 if (!pcm_status_mmap_allowed(pcm_file))
3626 return snd_pcm_mmap_status(substream, file, area);
3627 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3628 if (!pcm_control_mmap_allowed(pcm_file))
3630 return snd_pcm_mmap_control(substream, file, area);
3632 return snd_pcm_mmap_data(substream, file, area);
3637 static int snd_pcm_fasync(int fd, struct file * file, int on)
3639 struct snd_pcm_file * pcm_file;
3640 struct snd_pcm_substream *substream;
3641 struct snd_pcm_runtime *runtime;
3643 pcm_file = file->private_data;
3644 substream = pcm_file->substream;
3645 if (PCM_RUNTIME_CHECK(substream))
3647 runtime = substream->runtime;
3648 return fasync_helper(fd, file, on, &runtime->fasync);
3654 #ifdef CONFIG_COMPAT
3655 #include "pcm_compat.c"
3657 #define snd_pcm_ioctl_compat NULL
3661 * To be removed helpers to keep binary compatibility
3664 #ifdef CONFIG_SND_SUPPORT_OLD_API
3665 #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
3666 #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
3668 static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
3669 struct snd_pcm_hw_params_old *oparams)
3673 memset(params, 0, sizeof(*params));
3674 params->flags = oparams->flags;
3675 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3676 params->masks[i].bits[0] = oparams->masks[i];
3677 memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
3678 params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
3679 params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
3680 params->info = oparams->info;
3681 params->msbits = oparams->msbits;
3682 params->rate_num = oparams->rate_num;
3683 params->rate_den = oparams->rate_den;
3684 params->fifo_size = oparams->fifo_size;
3687 static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
3688 struct snd_pcm_hw_params *params)
3692 memset(oparams, 0, sizeof(*oparams));
3693 oparams->flags = params->flags;
3694 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3695 oparams->masks[i] = params->masks[i].bits[0];
3696 memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
3697 oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
3698 oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
3699 oparams->info = params->info;
3700 oparams->msbits = params->msbits;
3701 oparams->rate_num = params->rate_num;
3702 oparams->rate_den = params->rate_den;
3703 oparams->fifo_size = params->fifo_size;
3706 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3707 struct snd_pcm_hw_params_old __user * _oparams)
3709 struct snd_pcm_hw_params *params;
3710 struct snd_pcm_hw_params_old *oparams = NULL;
3713 params = kmalloc(sizeof(*params), GFP_KERNEL);
3717 oparams = memdup_user(_oparams, sizeof(*oparams));
3718 if (IS_ERR(oparams)) {
3719 err = PTR_ERR(oparams);
3722 snd_pcm_hw_convert_from_old_params(params, oparams);
3723 err = snd_pcm_hw_refine(substream, params);
3727 err = fixup_unreferenced_params(substream, params);
3731 snd_pcm_hw_convert_to_old_params(oparams, params);
3732 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3741 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3742 struct snd_pcm_hw_params_old __user * _oparams)
3744 struct snd_pcm_hw_params *params;
3745 struct snd_pcm_hw_params_old *oparams = NULL;
3748 params = kmalloc(sizeof(*params), GFP_KERNEL);
3752 oparams = memdup_user(_oparams, sizeof(*oparams));
3753 if (IS_ERR(oparams)) {
3754 err = PTR_ERR(oparams);
3758 snd_pcm_hw_convert_from_old_params(params, oparams);
3759 err = snd_pcm_hw_params(substream, params);
3763 snd_pcm_hw_convert_to_old_params(oparams, params);
3764 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3772 #endif /* CONFIG_SND_SUPPORT_OLD_API */
3775 static unsigned long snd_pcm_get_unmapped_area(struct file *file,
3778 unsigned long pgoff,
3779 unsigned long flags)
3781 struct snd_pcm_file *pcm_file = file->private_data;
3782 struct snd_pcm_substream *substream = pcm_file->substream;
3783 struct snd_pcm_runtime *runtime = substream->runtime;
3784 unsigned long offset = pgoff << PAGE_SHIFT;
3787 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3788 return (unsigned long)runtime->status;
3789 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3790 return (unsigned long)runtime->control;
3792 return (unsigned long)runtime->dma_area + offset;
3796 # define snd_pcm_get_unmapped_area NULL
3803 const struct file_operations snd_pcm_f_ops[2] = {
3805 .owner = THIS_MODULE,
3806 .write = snd_pcm_write,
3807 .write_iter = snd_pcm_writev,
3808 .open = snd_pcm_playback_open,
3809 .release = snd_pcm_release,
3810 .llseek = no_llseek,
3811 .poll = snd_pcm_poll,
3812 .unlocked_ioctl = snd_pcm_ioctl,
3813 .compat_ioctl = snd_pcm_ioctl_compat,
3814 .mmap = snd_pcm_mmap,
3815 .fasync = snd_pcm_fasync,
3816 .get_unmapped_area = snd_pcm_get_unmapped_area,
3819 .owner = THIS_MODULE,
3820 .read = snd_pcm_read,
3821 .read_iter = snd_pcm_readv,
3822 .open = snd_pcm_capture_open,
3823 .release = snd_pcm_release,
3824 .llseek = no_llseek,
3825 .poll = snd_pcm_poll,
3826 .unlocked_ioctl = snd_pcm_ioctl,
3827 .compat_ioctl = snd_pcm_ioctl_compat,
3828 .mmap = snd_pcm_mmap,
3829 .fasync = snd_pcm_fasync,
3830 .get_unmapped_area = snd_pcm_get_unmapped_area,