File manager - Edit - /home/newsbmcs.com/public_html/static/img/logo/dma.h.tar
Back
usr/src/linux-headers-5.15.0-133/arch/parisc/include/asm/dma.h 0000644 00000013331 15030273501 0017254 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen * and John Boyd, Nov. 1992. * (c) Copyright 2000, Grant Grundler */ #ifndef _ASM_DMA_H #define _ASM_DMA_H #include <asm/io.h> /* need byte IO */ #define dma_outb outb #define dma_inb inb /* ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up ** (or rather not merge) DMAs into manageable chunks. ** On parisc, this is more of the software/tuning constraint ** rather than the HW. I/O MMU allocation algorithms can be ** faster with smaller sizes (to some degree). */ #define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE) /* The maximum address that we can perform a DMA transfer to on this platform ** New dynamic DMA interfaces should obsolete this.... */ #define MAX_DMA_ADDRESS (~0UL) /* ** We don't have DMA channels... well V-class does but the ** Dynamic DMA Mapping interface will support them... right? :^) ** Note: this is not relevant right now for PA-RISC, but we cannot ** leave this as undefined because some things (e.g. sound) ** won't compile :-( */ #define MAX_DMA_CHANNELS 8 #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ #define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ #define DMA_AUTOINIT 0x10 /* 8237 DMA controllers */ #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ /* DMA controller registers */ #define DMA1_CMD_REG 0x08 /* command register (w) */ #define DMA1_STAT_REG 0x08 /* status register (r) */ #define DMA1_REQ_REG 0x09 /* request register (w) */ #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ #define DMA1_MODE_REG 0x0B /* mode register (w) */ #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ #define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG) #define DMA2_CMD_REG 0xD0 /* command register (w) */ #define DMA2_STAT_REG 0xD0 /* status register (r) */ #define DMA2_REQ_REG 0xD2 /* request register (w) */ #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ #define DMA2_MODE_REG 0xD6 /* mode register (w) */ #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ #define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG) static __inline__ unsigned long claim_dma_lock(void) { return 0; } static __inline__ void release_dma_lock(unsigned long flags) { } /* Get DMA residue count. After a DMA transfer, this * should return zero. Reading this while a DMA transfer is * still in progress will return unpredictable results. * If called before the channel has been used, it may return 1. * Otherwise, it returns the number of _bytes_ left to transfer. * * Assumes DMA flip-flop is clear. */ static __inline__ int get_dma_residue(unsigned int dmanr) { unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; /* using short to get 16-bit wrap around */ unsigned short count; count = 1 + dma_inb(io_port); count += dma_inb(io_port) << 8; return (dmanr<=3)? count : (count<<1); } /* enable/disable a specific DMA channel */ static __inline__ void enable_dma(unsigned int dmanr) { #ifdef CONFIG_SUPERIO if (dmanr<=3) dma_outb(dmanr, DMA1_MASK_REG); else dma_outb(dmanr & 3, DMA2_MASK_REG); #endif } static __inline__ void disable_dma(unsigned int dmanr) { #ifdef CONFIG_SUPERIO if (dmanr<=3) dma_outb(dmanr | 4, DMA1_MASK_REG); else dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); #endif } /* reserve a DMA channel */ #define request_dma(dmanr, device_id) (0) /* Clear the 'DMA Pointer Flip Flop'. * Write 0 for LSB/MSB, 1 for MSB/LSB access. * Use this once to initialize the FF to a known state. * After that, keep track of it. :-) * --- In order to do that, the DMA routines below should --- * --- only be used while holding the DMA lock ! --- */ static __inline__ void clear_dma_ff(unsigned int dmanr) { } /* set mode (above) for a specific DMA channel */ static __inline__ void set_dma_mode(unsigned int dmanr, char mode) { } /* Set only the page register bits of the transfer address. * This is used for successive transfers when we know the contents of * the lower 16 bits of the DMA current address register, but a 64k boundary * may have been crossed. */ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) { } /* Set transfer address & page bits for specific DMA channel. * Assumes dma flipflop is clear. */ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) { } /* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for * a specific DMA channel. * You must ensure the parameters are valid. * NOTE: from a manual: "the number of transfers is one more * than the initial word count"! This is taken into account. * Assumes dma flip-flop is clear. * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. */ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) { } #define free_dma(dmanr) #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif #endif /* _ASM_DMA_H */ usr/src/linux-headers-5.15.0-142/arch/sh/include/cpu-sh4/cpu/dma.h 0000644 00000000604 15030347351 0017703 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_CPU_SH4_DMA_H #define __ASM_CPU_SH4_DMA_H #include <linux/sh_intc.h> /* * SH7750/SH7751/SH7760 */ #define DMTE0_IRQ evt2irq(0x640) #define DMTE4_IRQ evt2irq(0x780) #define DMTE6_IRQ evt2irq(0x7c0) #define DMAE0_IRQ evt2irq(0x6c0) #define SH_DMAC_BASE0 0xffa00000 #define SH_DMAC_BASE1 0xffa00070 #endif /* __ASM_CPU_SH4_DMA_H */ usr/src/linux-headers-5.15.0-133/arch/arm/include/asm/dma.h 0000644 00000010277 15030350456 0016565 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARM_DMA_H #define __ASM_ARM_DMA_H /* * This is the maximum virtual address which can be DMA'd from. */ #ifndef CONFIG_ZONE_DMA #define MAX_DMA_ADDRESS 0xffffffffUL #else #define MAX_DMA_ADDRESS ({ \ extern phys_addr_t arm_dma_zone_size; \ arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \ (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) #endif #ifdef CONFIG_ISA_DMA_API /* * This is used to support drivers written for the x86 ISA DMA API. * It should not be re-used except for that purpose. */ #include <linux/spinlock.h> #include <linux/scatterlist.h> #include <mach/isa-dma.h> /* * The DMA modes reflect the settings for the ISA DMA controller */ #define DMA_MODE_MASK 0xcc #define DMA_MODE_READ 0x44 #define DMA_MODE_WRITE 0x48 #define DMA_MODE_CASCADE 0xc0 #define DMA_AUTOINIT 0x10 extern raw_spinlock_t dma_spin_lock; static inline unsigned long claim_dma_lock(void) { unsigned long flags; raw_spin_lock_irqsave(&dma_spin_lock, flags); return flags; } static inline void release_dma_lock(unsigned long flags) { raw_spin_unlock_irqrestore(&dma_spin_lock, flags); } /* Clear the 'DMA Pointer Flip Flop'. * Write 0 for LSB/MSB, 1 for MSB/LSB access. */ #define clear_dma_ff(chan) /* Set only the page register bits of the transfer address. * * NOTE: This is an architecture specific function, and should * be hidden from the drivers */ extern void set_dma_page(unsigned int chan, char pagenr); /* Request a DMA channel * * Some architectures may need to do allocate an interrupt */ extern int request_dma(unsigned int chan, const char * device_id); /* Free a DMA channel * * Some architectures may need to do free an interrupt */ extern void free_dma(unsigned int chan); /* Enable DMA for this channel * * On some architectures, this may have other side effects like * enabling an interrupt and setting the DMA registers. */ extern void enable_dma(unsigned int chan); /* Disable DMA for this channel * * On some architectures, this may have other side effects like * disabling an interrupt or whatever. */ extern void disable_dma(unsigned int chan); /* Test whether the specified channel has an active DMA transfer */ extern int dma_channel_active(unsigned int chan); /* Set the DMA scatter gather list for this channel * * This should not be called if a DMA channel is enabled, * especially since some DMA architectures don't update the * DMA address immediately, but defer it to the enable_dma(). */ extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg); /* Set the DMA address for this channel * * This should not be called if a DMA channel is enabled, * especially since some DMA architectures don't update the * DMA address immediately, but defer it to the enable_dma(). */ extern void __set_dma_addr(unsigned int chan, void *addr); #define set_dma_addr(chan, addr) \ __set_dma_addr(chan, (void *)__bus_to_virt(addr)) /* Set the DMA byte count for this channel * * This should not be called if a DMA channel is enabled, * especially since some DMA architectures don't update the * DMA count immediately, but defer it to the enable_dma(). */ extern void set_dma_count(unsigned int chan, unsigned long count); /* Set the transfer direction for this channel * * This should not be called if a DMA channel is enabled, * especially since some DMA architectures don't update the * DMA transfer direction immediately, but defer it to the * enable_dma(). */ extern void set_dma_mode(unsigned int chan, unsigned int mode); /* Set the transfer speed for this channel */ extern void set_dma_speed(unsigned int chan, int cycle_ns); /* Get DMA residue count. After a DMA transfer, this * should return zero. Reading this while a DMA transfer is * still in progress will return unpredictable results. * If called before the channel has been used, it may return 1. * Otherwise, it returns the number of _bytes_ left to transfer. */ extern int get_dma_residue(unsigned int chan); #ifndef NO_DMA #define NO_DMA 255 #endif #endif /* CONFIG_ISA_DMA_API */ #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif #endif /* __ASM_ARM_DMA_H */ usr/src/linux-headers-5.15.0-133/arch/xtensa/include/asm/dma.h 0000644 00000003446 15030452025 0017303 0 ustar 00 /* * include/asm-xtensa/dma.h * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003 - 2005 Tensilica Inc. */ #ifndef _XTENSA_DMA_H #define _XTENSA_DMA_H #include <asm/io.h> /* need byte IO */ /* * This is only to be defined if we have PC-like DMA. * By default this is not true on an Xtensa processor, * however on boards with a PCI bus, such functionality * might be emulated externally. * * NOTE: there still exists driver code that assumes * this is defined, eg. drivers/sound/soundcard.c (as of 2.4). */ #define MAX_DMA_CHANNELS 8 /* * The maximum virtual address to which DMA transfers * can be performed on this platform. * * NOTE: This is board (platform) specific, not processor-specific! * * NOTE: This assumes DMA transfers can only be performed on * the section of physical memory contiguously mapped in virtual * space for the kernel. For the Xtensa architecture, this * means the maximum possible size of this DMA area is * the size of the statically mapped kernel segment * (XCHAL_KSEG_{CACHED,BYPASS}_SIZE), ie. 128 MB. * * NOTE: When the entire KSEG area is DMA capable, we subtract * one from the max address so that the virt_to_phys() macro * works correctly on the address (otherwise the address * enters another area, and virt_to_phys() may not return * the value desired). */ #ifndef MAX_DMA_ADDRESS #define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1) #endif /* Reserve and release a DMA channel */ extern int request_dma(unsigned int dmanr, const char * device_id); extern void free_dma(unsigned int dmanr); #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif #endif usr/src/linux-headers-5.15.0-133/arch/microblaze/include/asm/dma.h 0000644 00000000660 15030514702 0020124 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2006 Atmark Techno, Inc. */ #ifndef _ASM_MICROBLAZE_DMA_H #define _ASM_MICROBLAZE_DMA_H /* Virtual address corresponding to last available physical memory address. */ #define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1) #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif #endif /* _ASM_MICROBLAZE_DMA_H */ usr/src/linux-headers-5.15.0-133/arch/sh/include/asm/dma.h 0000644 00000007104 15030520034 0016402 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 * * include/asm-sh/dma.h * * Copyright (C) 2003, 2004 Paul Mundt */ #ifndef __ASM_SH_DMA_H #define __ASM_SH_DMA_H #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/device.h> #include <asm-generic/dma.h> /* * Read and write modes can mean drastically different things depending on the * channel configuration. Consult your DMAC documentation and module * implementation for further clues. */ #define DMA_MODE_READ 0x00 #define DMA_MODE_WRITE 0x01 #define DMA_MODE_MASK 0x01 #define DMA_AUTOINIT 0x10 /* * DMAC (dma_info) flags */ enum { DMAC_CHANNELS_CONFIGURED = 0x01, DMAC_CHANNELS_TEI_CAPABLE = 0x02, /* Transfer end interrupt */ }; /* * DMA channel capabilities / flags */ enum { DMA_CONFIGURED = 0x01, /* * Transfer end interrupt, inherited from DMAC. * wait_queue used in dma_wait_for_completion. */ DMA_TEI_CAPABLE = 0x02, }; extern spinlock_t dma_spin_lock; struct dma_channel; struct dma_ops { int (*request)(struct dma_channel *chan); void (*free)(struct dma_channel *chan); int (*get_residue)(struct dma_channel *chan); int (*xfer)(struct dma_channel *chan); int (*configure)(struct dma_channel *chan, unsigned long flags); int (*extend)(struct dma_channel *chan, unsigned long op, void *param); }; struct dma_channel { char dev_id[16]; /* unique name per DMAC of channel */ unsigned int chan; /* DMAC channel number */ unsigned int vchan; /* Virtual channel number */ unsigned int mode; unsigned int count; unsigned long sar; unsigned long dar; const char **caps; unsigned long flags; atomic_t busy; wait_queue_head_t wait_queue; struct device dev; void *priv_data; }; struct dma_info { struct platform_device *pdev; const char *name; unsigned int nr_channels; unsigned long flags; struct dma_ops *ops; struct dma_channel *channels; struct list_head list; int first_channel_nr; int first_vchannel_nr; }; struct dma_chan_caps { int ch_num; const char **caplist; }; #define to_dma_channel(channel) container_of(channel, struct dma_channel, dev) /* arch/sh/drivers/dma/dma-api.c */ extern int dma_xfer(unsigned int chan, unsigned long from, unsigned long to, size_t size, unsigned int mode); #define dma_write(chan, from, to, size) \ dma_xfer(chan, from, to, size, DMA_MODE_WRITE) #define dma_write_page(chan, from, to) \ dma_write(chan, from, to, PAGE_SIZE) #define dma_read(chan, from, to, size) \ dma_xfer(chan, from, to, size, DMA_MODE_READ) #define dma_read_page(chan, from, to) \ dma_read(chan, from, to, PAGE_SIZE) extern int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id); extern int get_dma_residue(unsigned int chan); extern struct dma_info *get_dma_info(unsigned int chan); extern struct dma_channel *get_dma_channel(unsigned int chan); extern void dma_wait_for_completion(unsigned int chan); extern void dma_configure_channel(unsigned int chan, unsigned long flags); extern int register_dmac(struct dma_info *info); extern void unregister_dmac(struct dma_info *info); extern struct dma_info *get_dma_info_by_name(const char *dmac_name); extern int dma_extend(unsigned int chan, unsigned long op, void *param); extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist); /* arch/sh/drivers/dma/dma-sysfs.c */ extern int dma_create_sysfs_files(struct dma_channel *, struct dma_info *); extern void dma_remove_sysfs_files(struct dma_channel *, struct dma_info *); #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif #endif /* __ASM_SH_DMA_H */ usr/src/linux-headers-5.15.0-142/arch/parisc/include/asm/dma.h 0000644 00000013331 15030534506 0017261 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen * and John Boyd, Nov. 1992. * (c) Copyright 2000, Grant Grundler */ #ifndef _ASM_DMA_H #define _ASM_DMA_H #include <asm/io.h> /* need byte IO */ #define dma_outb outb #define dma_inb inb /* ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up ** (or rather not merge) DMAs into manageable chunks. ** On parisc, this is more of the software/tuning constraint ** rather than the HW. I/O MMU allocation algorithms can be ** faster with smaller sizes (to some degree). */ #define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE) /* The maximum address that we can perform a DMA transfer to on this platform ** New dynamic DMA interfaces should obsolete this.... */ #define MAX_DMA_ADDRESS (~0UL) /* ** We don't have DMA channels... well V-class does but the ** Dynamic DMA Mapping interface will support them... right? :^) ** Note: this is not relevant right now for PA-RISC, but we cannot ** leave this as undefined because some things (e.g. sound) ** won't compile :-( */ #define MAX_DMA_CHANNELS 8 #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ #define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ #define DMA_AUTOINIT 0x10 /* 8237 DMA controllers */ #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ /* DMA controller registers */ #define DMA1_CMD_REG 0x08 /* command register (w) */ #define DMA1_STAT_REG 0x08 /* status register (r) */ #define DMA1_REQ_REG 0x09 /* request register (w) */ #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ #define DMA1_MODE_REG 0x0B /* mode register (w) */ #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ #define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG) #define DMA2_CMD_REG 0xD0 /* command register (w) */ #define DMA2_STAT_REG 0xD0 /* status register (r) */ #define DMA2_REQ_REG 0xD2 /* request register (w) */ #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ #define DMA2_MODE_REG 0xD6 /* mode register (w) */ #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ #define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG) static __inline__ unsigned long claim_dma_lock(void) { return 0; } static __inline__ void release_dma_lock(unsigned long flags) { } /* Get DMA residue count. After a DMA transfer, this * should return zero. Reading this while a DMA transfer is * still in progress will return unpredictable results. * If called before the channel has been used, it may return 1. * Otherwise, it returns the number of _bytes_ left to transfer. * * Assumes DMA flip-flop is clear. */ static __inline__ int get_dma_residue(unsigned int dmanr) { unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; /* using short to get 16-bit wrap around */ unsigned short count; count = 1 + dma_inb(io_port); count += dma_inb(io_port) << 8; return (dmanr<=3)? count : (count<<1); } /* enable/disable a specific DMA channel */ static __inline__ void enable_dma(unsigned int dmanr) { #ifdef CONFIG_SUPERIO if (dmanr<=3) dma_outb(dmanr, DMA1_MASK_REG); else dma_outb(dmanr & 3, DMA2_MASK_REG); #endif } static __inline__ void disable_dma(unsigned int dmanr) { #ifdef CONFIG_SUPERIO if (dmanr<=3) dma_outb(dmanr | 4, DMA1_MASK_REG); else dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); #endif } /* reserve a DMA channel */ #define request_dma(dmanr, device_id) (0) /* Clear the 'DMA Pointer Flip Flop'. * Write 0 for LSB/MSB, 1 for MSB/LSB access. * Use this once to initialize the FF to a known state. * After that, keep track of it. :-) * --- In order to do that, the DMA routines below should --- * --- only be used while holding the DMA lock ! --- */ static __inline__ void clear_dma_ff(unsigned int dmanr) { } /* set mode (above) for a specific DMA channel */ static __inline__ void set_dma_mode(unsigned int dmanr, char mode) { } /* Set only the page register bits of the transfer address. * This is used for successive transfers when we know the contents of * the lower 16 bits of the DMA current address register, but a 64k boundary * may have been crossed. */ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) { } /* Set transfer address & page bits for specific DMA channel. * Assumes dma flipflop is clear. */ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) { } /* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for * a specific DMA channel. * You must ensure the parameters are valid. * NOTE: from a manual: "the number of transfers is one more * than the initial word count"! This is taken into account. * Assumes dma flip-flop is clear. * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. */ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) { } #define free_dma(dmanr) #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif #endif /* _ASM_DMA_H */ usr/src/linux-headers-5.15.0-133/arch/arm/mach-pxa/include/mach/dma.h 0000644 00000000576 15030541446 0020415 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/mach-pxa/include/mach/dma.h * * Author: Nicolas Pitre * Created: Jun 15, 2001 * Copyright: MontaVista Software, Inc. */ #ifndef __ASM_ARCH_DMA_H #define __ASM_ARCH_DMA_H #include <mach/hardware.h> /* DMA Controller Registers Definitions */ #define DMAC_REGS_VIRT io_p2v(0x40000000) #endif /* _ASM_ARCH_DMA_H */ usr/src/linux-headers-5.15.0-141/arch/arc/include/asm/dma.h 0000644 00000000471 15030564442 0016547 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #ifndef ASM_ARC_DMA_H #define ASM_ARC_DMA_H #define MAX_DMA_ADDRESS 0xC0000000 #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy 0 #endif #endif usr/src/linux-headers-5.15.0-142/arch/arm/include/asm/mach/dma.h 0000644 00000002672 15030567404 0017500 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/asm/mach/dma.h * * Copyright (C) 1998-2000 Russell King * * This header file describes the interface between the generic DMA handler * (dma.c) and the architecture-specific DMA backends (dma-*.c) */ struct dma_struct; typedef struct dma_struct dma_t; struct dma_ops { int (*request)(unsigned int, dma_t *); /* optional */ void (*free)(unsigned int, dma_t *); /* optional */ void (*enable)(unsigned int, dma_t *); /* mandatory */ void (*disable)(unsigned int, dma_t *); /* mandatory */ int (*residue)(unsigned int, dma_t *); /* optional */ int (*setspeed)(unsigned int, dma_t *, int); /* optional */ const char *type; }; struct dma_struct { void *addr; /* single DMA address */ unsigned long count; /* single DMA size */ struct scatterlist buf; /* single DMA */ int sgcount; /* number of DMA SG */ struct scatterlist *sg; /* DMA Scatter-Gather List */ unsigned int active:1; /* Transfer active */ unsigned int invalid:1; /* Address/Count changed */ unsigned int dma_mode; /* DMA mode */ int speed; /* DMA speed */ unsigned int lock; /* Device is allocated */ const char *device_id; /* Device name */ const struct dma_ops *d_ops; }; /* * isa_dma_add - add an ISA-style DMA channel */ extern int isa_dma_add(unsigned int, dma_t *dma); /* * Add the ISA DMA controller. Always takes channels 0-7. */ extern void isa_init_dma(void); usr/src/linux-headers-5.15.0-142/arch/sh/include/cpu-sh4a/cpu/dma.h 0000644 00000005050 15030567473 0020055 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_SH_CPU_SH4_DMA_SH7780_H #define __ASM_SH_CPU_SH4_DMA_SH7780_H #include <linux/sh_intc.h> #if defined(CONFIG_CPU_SUBTYPE_SH7343) || \ defined(CONFIG_CPU_SUBTYPE_SH7730) #define DMTE0_IRQ evt2irq(0x800) #define DMTE4_IRQ evt2irq(0xb80) #define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 #elif defined(CONFIG_CPU_SUBTYPE_SH7722) #define DMTE0_IRQ evt2irq(0x800) #define DMTE4_IRQ evt2irq(0xb80) #define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) #define DMTE0_IRQ evt2irq(0x640) #define DMTE4_IRQ evt2irq(0x780) #define DMAE0_IRQ evt2irq(0x6c0) #define SH_DMAC_BASE0 0xFF608020 #elif defined(CONFIG_CPU_SUBTYPE_SH7723) #define DMTE0_IRQ evt2irq(0x800) /* DMAC0A*/ #define DMTE4_IRQ evt2irq(0xb80) /* DMAC0B */ #define DMTE6_IRQ evt2irq(0x700) #define DMTE8_IRQ evt2irq(0x740) /* DMAC1A */ #define DMTE9_IRQ evt2irq(0x760) #define DMTE10_IRQ evt2irq(0xb00) /* DMAC1B */ #define DMTE11_IRQ evt2irq(0xb20) #define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/ #define DMAE1_IRQ evt2irq(0xb40) /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 #define SH_DMAC_BASE1 0xFDC08020 #elif defined(CONFIG_CPU_SUBTYPE_SH7724) #define DMTE0_IRQ evt2irq(0x800) /* DMAC0A*/ #define DMTE4_IRQ evt2irq(0xb80) /* DMAC0B */ #define DMTE6_IRQ evt2irq(0x700) #define DMTE8_IRQ evt2irq(0x740) /* DMAC1A */ #define DMTE9_IRQ evt2irq(0x760) #define DMTE10_IRQ evt2irq(0xb00) /* DMAC1B */ #define DMTE11_IRQ evt2irq(0xb20) #define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/ #define DMAE1_IRQ evt2irq(0xb40) /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 #define SH_DMAC_BASE1 0xFDC08020 #elif defined(CONFIG_CPU_SUBTYPE_SH7780) #define DMTE0_IRQ evt2irq(0x640) #define DMTE4_IRQ evt2irq(0x780) #define DMTE6_IRQ evt2irq(0x7c0) #define DMTE8_IRQ evt2irq(0xd80) #define DMTE9_IRQ evt2irq(0xda0) #define DMTE10_IRQ evt2irq(0xdc0) #define DMTE11_IRQ evt2irq(0xde0) #define DMAE0_IRQ evt2irq(0x6c0) /* DMA Error IRQ */ #define SH_DMAC_BASE0 0xFC808020 #define SH_DMAC_BASE1 0xFC818020 #else /* SH7785 */ #define DMTE0_IRQ evt2irq(0x620) #define DMTE4_IRQ evt2irq(0x6a0) #define DMTE6_IRQ evt2irq(0x880) #define DMTE8_IRQ evt2irq(0x8c0) #define DMTE9_IRQ evt2irq(0x8e0) #define DMTE10_IRQ evt2irq(0x900) #define DMTE11_IRQ evt2irq(0x920) #define DMAE0_IRQ evt2irq(0x6e0) /* DMA Error IRQ0 */ #define DMAE1_IRQ evt2irq(0x940) /* DMA Error IRQ1 */ #define SH_DMAC_BASE0 0xFC808020 #define SH_DMAC_BASE1 0xFCC08020 #endif #endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */ usr/src/linux-headers-5.15.0-141/arch/mips/include/asm/dma.h 0000644 00000023661 15030567700 0016760 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen * and John Boyd, Nov. 1992. * * NOTE: all this is true *only* for ISA/EISA expansions on Mips boards * and can only be used for expansion cards. Onboard DMA controllers, such * as the R4030 on Jazz boards behave totally different! */ #ifndef _ASM_DMA_H #define _ASM_DMA_H #include <asm/io.h> /* need byte IO */ #include <linux/spinlock.h> /* And spinlocks */ #include <linux/delay.h> #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER #define dma_outb outb_p #else #define dma_outb outb #endif #define dma_inb inb /* * NOTES about DMA transfers: * * controller 1: channels 0-3, byte operations, ports 00-1F * controller 2: channels 4-7, word operations, ports C0-DF * * - ALL registers are 8 bits only, regardless of transfer size * - channel 4 is not used - cascades 1 into 2. * - channels 0-3 are byte - addresses/counts are for physical bytes * - channels 5-7 are word - addresses/counts are for physical words * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries * - transfer count loaded to registers is 1 less than actual count * - controller 2 offsets are all even (2x offsets for controller 1) * - page registers for 5-7 don't use data bit 0, represent 128K pages * - page registers for 0-3 use bit 0, represent 64K pages * * DMA transfers are limited to the lower 16MB of _physical_ memory. * Note that addresses loaded into registers must be _physical_ addresses, * not logical addresses (which may differ if paging is active). * * Address mapping for channels 0-3: * * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) * | ... | | ... | | ... | * | ... | | ... | | ... | * | ... | | ... | | ... | * P7 ... P0 A7 ... A0 A7 ... A0 * | Page | Addr MSB | Addr LSB | (DMA registers) * * Address mapping for channels 5-7: * * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) * | ... | \ \ ... \ \ \ ... \ \ * | ... | \ \ ... \ \ \ ... \ (not used) * | ... | \ \ ... \ \ \ ... \ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 * | Page | Addr MSB | Addr LSB | (DMA registers) * * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at * the hardware level, so odd-byte transfers aren't possible). * * Transfer count (_not # bytes_) is limited to 64K, represented as actual * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, * and up to 128K bytes may be transferred on channels 5-7 in one operation. * */ #ifndef CONFIG_GENERIC_ISA_DMA_SUPPORT_BROKEN #define MAX_DMA_CHANNELS 8 #endif /* * The maximum address in KSEG0 that we can perform a DMA transfer to on this * platform. This describes only the PC style part of the DMA logic like on * Deskstations or Acer PICA but not the much more versatile DMA logic used * for the local devices on Acer PICA or Magnums. */ #if defined(CONFIG_SGI_IP22) || defined(CONFIG_SGI_IP28) /* don't care; ISA bus master won't work, ISA slave DMA supports 32bit addr */ #define MAX_DMA_ADDRESS PAGE_OFFSET #else #define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000) #endif #define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS)) #ifndef MAX_DMA32_PFN #define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) #endif /* 8237 DMA controllers */ #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ /* DMA controller registers */ #define DMA1_CMD_REG 0x08 /* command register (w) */ #define DMA1_STAT_REG 0x08 /* status register (r) */ #define DMA1_REQ_REG 0x09 /* request register (w) */ #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ #define DMA1_MODE_REG 0x0B /* mode register (w) */ #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ #define DMA2_CMD_REG 0xD0 /* command register (w) */ #define DMA2_STAT_REG 0xD0 /* status register (r) */ #define DMA2_REQ_REG 0xD2 /* request register (w) */ #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ #define DMA2_MODE_REG 0xD6 /* mode register (w) */ #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ #define DMA_ADDR_0 0x00 /* DMA address registers */ #define DMA_ADDR_1 0x02 #define DMA_ADDR_2 0x04 #define DMA_ADDR_3 0x06 #define DMA_ADDR_4 0xC0 #define DMA_ADDR_5 0xC4 #define DMA_ADDR_6 0xC8 #define DMA_ADDR_7 0xCC #define DMA_CNT_0 0x01 /* DMA count registers */ #define DMA_CNT_1 0x03 #define DMA_CNT_2 0x05 #define DMA_CNT_3 0x07 #define DMA_CNT_4 0xC2 #define DMA_CNT_5 0xC6 #define DMA_CNT_6 0xCA #define DMA_CNT_7 0xCE #define DMA_PAGE_0 0x87 /* DMA page registers */ #define DMA_PAGE_1 0x83 #define DMA_PAGE_2 0x81 #define DMA_PAGE_3 0x82 #define DMA_PAGE_5 0x8B #define DMA_PAGE_6 0x89 #define DMA_PAGE_7 0x8A #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ #define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ #define DMA_AUTOINIT 0x10 extern spinlock_t dma_spin_lock; static __inline__ unsigned long claim_dma_lock(void) { unsigned long flags; spin_lock_irqsave(&dma_spin_lock, flags); return flags; } static __inline__ void release_dma_lock(unsigned long flags) { spin_unlock_irqrestore(&dma_spin_lock, flags); } /* enable/disable a specific DMA channel */ static __inline__ void enable_dma(unsigned int dmanr) { if (dmanr<=3) dma_outb(dmanr, DMA1_MASK_REG); else dma_outb(dmanr & 3, DMA2_MASK_REG); } static __inline__ void disable_dma(unsigned int dmanr) { if (dmanr<=3) dma_outb(dmanr | 4, DMA1_MASK_REG); else dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); } /* Clear the 'DMA Pointer Flip Flop'. * Write 0 for LSB/MSB, 1 for MSB/LSB access. * Use this once to initialize the FF to a known state. * After that, keep track of it. :-) * --- In order to do that, the DMA routines below should --- * --- only be used while holding the DMA lock ! --- */ static __inline__ void clear_dma_ff(unsigned int dmanr) { if (dmanr<=3) dma_outb(0, DMA1_CLEAR_FF_REG); else dma_outb(0, DMA2_CLEAR_FF_REG); } /* set mode (above) for a specific DMA channel */ static __inline__ void set_dma_mode(unsigned int dmanr, char mode) { if (dmanr<=3) dma_outb(mode | dmanr, DMA1_MODE_REG); else dma_outb(mode | (dmanr&3), DMA2_MODE_REG); } /* Set only the page register bits of the transfer address. * This is used for successive transfers when we know the contents of * the lower 16 bits of the DMA current address register, but a 64k boundary * may have been crossed. */ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) { switch(dmanr) { case 0: dma_outb(pagenr, DMA_PAGE_0); break; case 1: dma_outb(pagenr, DMA_PAGE_1); break; case 2: dma_outb(pagenr, DMA_PAGE_2); break; case 3: dma_outb(pagenr, DMA_PAGE_3); break; case 5: dma_outb(pagenr & 0xfe, DMA_PAGE_5); break; case 6: dma_outb(pagenr & 0xfe, DMA_PAGE_6); break; case 7: dma_outb(pagenr & 0xfe, DMA_PAGE_7); break; } } /* Set transfer address & page bits for specific DMA channel. * Assumes dma flipflop is clear. */ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) { set_dma_page(dmanr, a>>16); if (dmanr <= 3) { dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); } else { dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); } } /* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for * a specific DMA channel. * You must ensure the parameters are valid. * NOTE: from a manual: "the number of transfers is one more * than the initial word count"! This is taken into account. * Assumes dma flip-flop is clear. * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. */ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) { count--; if (dmanr <= 3) { dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); } else { dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); } } /* Get DMA residue count. After a DMA transfer, this * should return zero. Reading this while a DMA transfer is * still in progress will return unpredictable results. * If called before the channel has been used, it may return 1. * Otherwise, it returns the number of _bytes_ left to transfer. * * Assumes DMA flip-flop is clear. */ static __inline__ int get_dma_residue(unsigned int dmanr) { unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; /* using short to get 16-bit wrap around */ unsigned short count; count = 1 + dma_inb(io_port); count += dma_inb(io_port) << 8; return (dmanr<=3)? count : (count<<1); } /* These are in kernel/dma.c: */ extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ extern void free_dma(unsigned int dmanr); /* release it again */ /* From PCI */ #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif #endif /* _ASM_DMA_H */ usr/src/linux-headers-5.15.0-142/arch/hexagon/include/asm/dma.h 0000644 00000000454 15030572105 0017430 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. */ #ifndef _ASM_DMA_H #define _ASM_DMA_H #include <asm/io.h> #define MAX_DMA_CHANNELS 1 #define MAX_DMA_ADDRESS (PAGE_OFFSET) extern size_t hexagon_coherent_pool_size; #endif usr/src/linux-headers-5.15.0-141/arch/arm/include/asm/mach/dma.h 0000644 00000002672 15030623341 0017470 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/asm/mach/dma.h * * Copyright (C) 1998-2000 Russell King * * This header file describes the interface between the generic DMA handler * (dma.c) and the architecture-specific DMA backends (dma-*.c) */ struct dma_struct; typedef struct dma_struct dma_t; struct dma_ops { int (*request)(unsigned int, dma_t *); /* optional */ void (*free)(unsigned int, dma_t *); /* optional */ void (*enable)(unsigned int, dma_t *); /* mandatory */ void (*disable)(unsigned int, dma_t *); /* mandatory */ int (*residue)(unsigned int, dma_t *); /* optional */ int (*setspeed)(unsigned int, dma_t *, int); /* optional */ const char *type; }; struct dma_struct { void *addr; /* single DMA address */ unsigned long count; /* single DMA size */ struct scatterlist buf; /* single DMA */ int sgcount; /* number of DMA SG */ struct scatterlist *sg; /* DMA Scatter-Gather List */ unsigned int active:1; /* Transfer active */ unsigned int invalid:1; /* Address/Count changed */ unsigned int dma_mode; /* DMA mode */ int speed; /* DMA speed */ unsigned int lock; /* Device is allocated */ const char *device_id; /* Device name */ const struct dma_ops *d_ops; }; /* * isa_dma_add - add an ISA-style DMA channel */ extern int isa_dma_add(unsigned int, dma_t *dma); /* * Add the ISA DMA controller. Always takes channels 0-7. */ extern void isa_init_dma(void);
| ver. 1.4 |
Github
|
.
| PHP 8.2.28 | Generation time: 0.02 |
proxy
|
phpinfo
|
Settings