diff options
Diffstat (limited to 'include/linux/dmaengine.h')
| -rw-r--r-- | include/linux/dmaengine.h | 97 |
1 files changed, 55 insertions, 42 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 41cf0c399288..212c5b9ac106 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -22,6 +22,7 @@ #define LINUX_DMAENGINE_H #include <linux/device.h> +#include <linux/err.h> #include <linux/uio.h> #include <linux/bug.h> #include <linux/scatterlist.h> @@ -36,7 +37,6 @@ */ typedef s32 dma_cookie_t; #define DMA_MIN_COOKIE 1 -#define DMA_MAX_COOKIE INT_MAX static inline int dma_submit_error(dma_cookie_t cookie) { @@ -256,7 +256,7 @@ struct dma_chan_percpu { * @dev: class device for sysfs * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu - * @client-count: how many clients are using this channel + * @client_count: how many clients are using this channel * @table_count: number of appearances in the mem-to-mem allocation table * @private: private data for certain client-channel associations */ @@ -278,10 +278,10 @@ struct dma_chan { /** * struct dma_chan_dev - relate sysfs device node to backing channel device - * @chan - driver channel device - * @device - sysfs device - * @dev_id - parent dma_device dev_id - * @idr_ref - reference count to gate release of dma_device dev_id + * @chan: driver channel device + * @device: sysfs device + * @dev_id: parent dma_device dev_id + * @idr_ref: reference count to gate release of dma_device dev_id */ struct dma_chan_dev { struct dma_chan *chan; @@ -291,13 +291,14 @@ struct dma_chan_dev { }; /** - * enum dma_slave_buswidth - defines bus with of the DMA slave + * enum dma_slave_buswidth - defines bus width of the DMA slave * device, source or target buses */ enum dma_slave_buswidth { DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, DMA_SLAVE_BUSWIDTH_1_BYTE = 1, DMA_SLAVE_BUSWIDTH_2_BYTES = 2, + DMA_SLAVE_BUSWIDTH_3_BYTES = 3, DMA_SLAVE_BUSWIDTH_4_BYTES = 4, DMA_SLAVE_BUSWIDTH_8_BYTES = 8, }; @@ -305,9 +306,8 @@ enum dma_slave_buswidth { /** * struct dma_slave_config - dma slave channel runtime config * @direction: whether the data shall go in or out on this slave - * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are - * legal values, DMA_BIDIRECTIONAL is not acceptable since we - * need to differentiate source and target addresses. + * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are + * legal values. * @src_addr: this is the physical address where DMA slave data * should be read (RX), if the source is memory this argument is * ignored. @@ -341,15 +341,11 @@ enum dma_slave_buswidth { * and this struct will then be passed in as an argument to the * DMA engine device_control() function. * - * The rationale for adding configuration information to this struct - * is as follows: if it is likely that most DMA slave controllers in - * the world will support the configuration option, then make it - * generic. If not: if it is fixed so that it be sent in static from - * the platform data, then prefer to do that. Else, if it is neither - * fixed at runtime, nor generic enough (such as bus mastership on - * some CPU family and whatnot) then create a custom slave config - * struct and pass that, then make this config a member of that - * struct, if applicable. + * The rationale for adding configuration information to this struct is as + * follows: if it is likely that more than one DMA slave controllers in + * the world will support the configuration option, then make it generic. + * If not: if it is fixed so that it be sent in static from the platform + * data, then prefer to do that. */ struct dma_slave_config { enum dma_transfer_direction direction; @@ -363,6 +359,32 @@ struct dma_slave_config { unsigned int slave_id; }; +/** + * enum dma_residue_granularity - Granularity of the reported transfer residue + * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The + * DMA channel is only able to tell whether a descriptor has been completed or + * not, which means residue reporting is not supported by this channel. The + * residue field of the dma_tx_state field will always be 0. + * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully + * completed segment of the transfer (For cyclic transfers this is after each + * period). This is typically implemented by having the hardware generate an + * interrupt after each transferred segment and then the drivers updates the + * outstanding residue by the size of the segment. Another possibility is if + * the hardware supports scatter-gather and the segment descriptor has a field + * which gets set after the segment has been completed. The driver then counts + * the number of segments without the flag set to compute the residue. + * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred + * burst. This is typically only supported if the hardware has a progress + * register of some sort (E.g. a register with the current read/write address + * or a register with the amount of bursts/beats/bytes that have been + * transferred or still need to be transferred). + */ +enum dma_residue_granularity { + DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, + DMA_RESIDUE_GRANULARITY_SEGMENT = 1, + DMA_RESIDUE_GRANULARITY_BURST = 2, +}; + /* struct dma_slave_caps - expose capabilities of a slave channel only * * @src_addr_widths: bit mask of src addr widths the channel supports @@ -373,6 +395,7 @@ struct dma_slave_config { * should be checked by controller as well * @cmd_pause: true, if pause and thereby resume is supported * @cmd_terminate: true, if terminate cmd is supported + * @residue_granularity: granularity of the reported transfer residue */ struct dma_slave_caps { u32 src_addr_widths; @@ -380,6 +403,7 @@ struct dma_slave_caps { u32 directions; bool cmd_pause; bool cmd_terminate; + enum dma_residue_granularity residue_granularity; }; static inline const char *dma_chan_name(struct dma_chan *chan) @@ -405,6 +429,7 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); typedef void (*dma_async_tx_callback)(void *dma_async_param); struct dmaengine_unmap_data { + u8 map_cnt; u8 to_cnt; u8 from_cnt; u8 bidi_cnt; @@ -645,7 +670,7 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context); + unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags); @@ -720,7 +745,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( unsigned long flags) { return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, - period_len, dir, flags, NULL); + period_len, dir, flags); } static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( @@ -875,18 +900,6 @@ static inline void dmaengine_put(void) } #endif -#ifdef CONFIG_NET_DMA -#define net_dmaengine_get() dmaengine_get() -#define net_dmaengine_put() dmaengine_put() -#else -static inline void net_dmaengine_get(void) -{ -} -static inline void net_dmaengine_put(void) -{ -} -#endif - #ifdef CONFIG_ASYNC_TX_DMA #define async_dmaengine_get() dmaengine_get() #define async_dmaengine_put() dmaengine_put() @@ -908,16 +921,8 @@ async_dma_find_channel(enum dma_transaction_type type) return NULL; } #endif /* CONFIG_ASYNC_TX_DMA */ - -dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, - void *dest, void *src, size_t len); -dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, - struct page *page, unsigned int offset, void *kdata, size_t len); -dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, - struct page *dest_pg, unsigned int dest_off, struct page *src_pg, - unsigned int src_off, size_t len); void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, - struct dma_chan *chan); + struct dma_chan *chan); static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) { @@ -1040,6 +1045,8 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); +struct dma_chan *dma_request_slave_channel_reason(struct device *dev, + const char *name); struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); void dma_release_channel(struct dma_chan *chan); #else @@ -1063,6 +1070,11 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, { return NULL; } +static inline struct dma_chan *dma_request_slave_channel_reason( + struct device *dev, const char *name) +{ + return ERR_PTR(-ENODEV); +} static inline struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) { @@ -1079,6 +1091,7 @@ int dma_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); struct dma_chan *net_dma_find_channel(void); #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ |
