.unmap_page = sba_unmap_page,
.map_sg = sba_map_sg_attrs,
.unmap_sg = sba_unmap_sg_attrs,
- .sync_single_for_cpu = machvec_dma_sync_single,
- .sync_sg_for_cpu = machvec_dma_sync_sg,
- .sync_single_for_device = machvec_dma_sync_single,
- .sync_sg_for_device = machvec_dma_sync_sg,
.dma_supported = sba_dma_supported,
.mapping_error = sba_dma_mapping_error,
};
extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void);
-extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
- enum dma_data_direction);
-extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
return platform_dma_get_ops(NULL);
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
-extern void pci_iommu_shutdown(void);
extern void no_iommu_init(void);
#ifdef CONFIG_INTEL_IOMMU
extern int force_iommu, no_iommu;
#define no_iommu (1)
#define iommu_detected (0)
#endif
-extern void iommu_dma_init(void);
extern void machvec_init(const char *name);
#endif
} while (md);
return 0; /* never reached */
}
-EXPORT_SYMBOL(kern_mem_attribute);
int
valid_phys_addr_range (phys_addr_t phys_addr, unsigned long size)
{
}
EXPORT_SYMBOL(machvec_timer_interrupt);
-
-void
-machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir)
-{
- mb();
-}
-EXPORT_SYMBOL(machvec_dma_sync_single);
-
-void
-machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n,
- enum dma_data_direction dir)
-{
- mb();
-}
-EXPORT_SYMBOL(machvec_dma_sync_sg);
#include <linux/kernel.h>
#include <asm/page.h>
-dma_addr_t bad_dma_address __read_mostly;
-EXPORT_SYMBOL(bad_dma_address);
-
-static int iommu_sac_force __read_mostly;
-
int no_iommu __read_mostly;
#ifdef CONFIG_IOMMU_DEBUG
int force_iommu __read_mostly = 1;
int iommu_pass_through;
-extern struct dma_map_ops intel_dma_ops;
-
static int __init pci_iommu_init(void)
{
if (iommu_detected)
/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);
-void pci_iommu_shutdown(void)
-{
- return;
-}
-
-void __init
-iommu_dma_init(void)
-{
- return;
-}
-
-int iommu_dma_supported(struct device *dev, u64 mask)
-{
- /* Copied from i386. Doesn't make much sense, because it will
- only work for pci_alloc_coherent.
- The caller just has to use GFP_DMA in this case. */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- /* Tell the device to use SAC when IOMMU force is on. This
- allows the driver to use cheaper accesses in some cases.
-
- Problem with this is that if we overflow the IOMMU area and
- return DAC as fallback address the device may not handle it
- correctly.
-
- As a special case some controllers have a 39bit address
- mode that is as efficient as 32bit (aic79xx). Don't force
- SAC for these. Assume all masks <= 40 bits are of this
- type. Normally this doesn't make any difference, but gives
- more gentle handling of IOMMU overflow. */
- if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
- dev_info(dev, "Force SAC with mask %llx\n", mask);
- return 0;
- }
-
- return 1;
-}
-EXPORT_SYMBOL(iommu_dma_supported);
-
void __init pci_iommu_alloc(void)
{
- dma_ops = &intel_dma_ops;
-
- intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single;
- intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg;
- intel_dma_ops.sync_single_for_device = machvec_dma_sync_single;
- intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg;
- intel_dma_ops.dma_supported = iommu_dma_supported;
-
/*
* The order of these functions is important for
* fall-back/fail-over reasons
return nhwentries;
}
-static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir)
-{
- BUG_ON(!dev_is_pci(dev));
-}
-
-static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!dev_is_pci(dev));
-}
-
-static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- BUG_ON(!dev_is_pci(dev));
-}
-
-static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- BUG_ON(!dev_is_pci(dev));
-}
-
static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
.unmap_page = sn_dma_unmap_page,
.map_sg = sn_dma_map_sg,
.unmap_sg = sn_dma_unmap_sg,
- .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
- .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
- .sync_single_for_device = sn_dma_sync_single_for_device,
- .sync_sg_for_device = sn_dma_sync_sg_for_device,
.mapping_error = sn_dma_mapping_error,
.dma_supported = sn_dma_supported,
.get_required_mask = sn_dma_get_required_mask,
return !dma_addr;
}
-const struct dma_map_ops intel_dma_ops = {
+static const struct dma_map_ops intel_dma_ops = {
.alloc = intel_alloc_coherent,
.free = intel_free_coherent,
.map_sg = intel_map_sg,
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
.mapping_error = intel_mapping_error,
-#ifdef CONFIG_X86
.dma_supported = dma_direct_supported,
-#endif
};
static inline int iommu_domain_cache_init(void)