aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/chipidea/udc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/chipidea/udc.c')
-rw-r--r--drivers/usb/chipidea/udc.c178
1 files changed, 172 insertions, 6 deletions
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 69ef3cd8d4f8..8a9b31fd5c89 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmapool.h>
+#include <linux/dma-direct.h>
#include <linux/err.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
@@ -540,6 +541,126 @@ static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
return ret;
}
+/*
+ * Verify if the scatterlist is valid by iterating each sg entry.
+ * Return invalid sg entry index which is less than num_sgs.
+ */
+static int sglist_get_invalid_entry(struct device *dma_dev, u8 dir,
+ struct usb_request *req)
+{
+ int i;
+ struct scatterlist *s = req->sg;
+
+ if (req->num_sgs == 1)
+ return 1;
+
+ dir = dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ for (i = 0; i < req->num_sgs; i++, s = sg_next(s)) {
+ /* Only small sg (generally last sg) may be bounced. If
+ * that happens. we can't ensure the addr is page-aligned
+ * after dma map.
+ */
+ if (dma_kmalloc_needs_bounce(dma_dev, s->length, dir))
+ break;
+
+ /* Make sure each sg start address (except first sg) is
+ * page-aligned and end address (except last sg) is also
+ * page-aligned.
+ */
+ if (i == 0) {
+ if (!IS_ALIGNED(s->offset + s->length,
+ CI_HDRC_PAGE_SIZE))
+ break;
+ } else {
+ if (s->offset)
+ break;
+ if (!sg_is_last(s) && !IS_ALIGNED(s->length,
+ CI_HDRC_PAGE_SIZE))
+ break;
+ }
+ }
+
+ return i;
+}
+
+static int sglist_do_bounce(struct ci_hw_req *hwreq, int index,
+ bool copy, unsigned int *bounced)
+{
+ void *buf;
+ int i, ret, nents, num_sgs;
+ unsigned int rest, rounded;
+ struct scatterlist *sg, *src, *dst;
+
+ nents = index + 1;
+ ret = sg_alloc_table(&hwreq->sgt, nents, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ sg = src = hwreq->req.sg;
+ num_sgs = hwreq->req.num_sgs;
+ rest = hwreq->req.length;
+ dst = hwreq->sgt.sgl;
+
+ for (i = 0; i < index; i++) {
+ memcpy(dst, src, sizeof(*src));
+ rest -= src->length;
+ src = sg_next(src);
+ dst = sg_next(dst);
+ }
+
+ /* create one bounce buffer */
+ rounded = round_up(rest, CI_HDRC_PAGE_SIZE);
+ buf = kmalloc(rounded, GFP_KERNEL);
+ if (!buf) {
+ sg_free_table(&hwreq->sgt);
+ return -ENOMEM;
+ }
+
+ sg_set_buf(dst, buf, rounded);
+
+ hwreq->req.sg = hwreq->sgt.sgl;
+ hwreq->req.num_sgs = nents;
+ hwreq->sgt.sgl = sg;
+ hwreq->sgt.nents = num_sgs;
+
+ if (copy)
+ sg_copy_to_buffer(src, num_sgs - index, buf, rest);
+
+ *bounced = rest;
+
+ return 0;
+}
+
+static void sglist_do_debounce(struct ci_hw_req *hwreq, bool copy)
+{
+ void *buf;
+ int i, nents, num_sgs;
+ struct scatterlist *sg, *src, *dst;
+
+ sg = hwreq->req.sg;
+ num_sgs = hwreq->req.num_sgs;
+ src = sg_last(sg, num_sgs);
+ buf = sg_virt(src);
+
+ if (copy) {
+ dst = hwreq->sgt.sgl;
+ for (i = 0; i < num_sgs - 1; i++)
+ dst = sg_next(dst);
+
+ nents = hwreq->sgt.nents - num_sgs + 1;
+ sg_copy_from_buffer(dst, nents, buf, sg_dma_len(src));
+ }
+
+ hwreq->req.sg = hwreq->sgt.sgl;
+ hwreq->req.num_sgs = hwreq->sgt.nents;
+ hwreq->sgt.sgl = sg;
+ hwreq->sgt.nents = num_sgs;
+
+ kfree(buf);
+ sg_free_table(&hwreq->sgt);
+}
+
/**
* _hardware_enqueue: configures a request at hardware level
* @hwep: endpoint
@@ -552,6 +673,8 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
struct ci_hdrc *ci = hwep->ci;
int ret = 0;
struct td_node *firstnode, *lastnode;
+ unsigned int bounced_size;
+ struct scatterlist *sg;
/* don't queue twice */
if (hwreq->req.status == -EALREADY)
@@ -559,11 +682,29 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
hwreq->req.status = -EALREADY;
+ if (hwreq->req.num_sgs && hwreq->req.length &&
+ ci->has_short_pkt_limit) {
+ ret = sglist_get_invalid_entry(ci->dev->parent, hwep->dir,
+ &hwreq->req);
+ if (ret < hwreq->req.num_sgs) {
+ ret = sglist_do_bounce(hwreq, ret, hwep->dir == TX,
+ &bounced_size);
+ if (ret)
+ return ret;
+ }
+ }
+
ret = usb_gadget_map_request_by_dev(ci->dev->parent,
&hwreq->req, hwep->dir);
if (ret)
return ret;
+ if (hwreq->sgt.sgl) {
+ /* We've mapped a bigger buffer, now recover the actual size */
+ sg = sg_last(hwreq->req.sg, hwreq->req.num_sgs);
+ sg_dma_len(sg) = min(sg_dma_len(sg), bounced_size);
+ }
+
if (hwreq->req.num_mapped_sgs)
ret = prepare_td_for_sg(hwep, hwreq);
else
@@ -612,10 +753,17 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
do {
hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
- } while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
+ } while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW) && tmp_stat);
hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
if (tmp_stat)
goto done;
+
+ /* OP_ENDPTSTAT will be clear by HW when the endpoint met
+ * err. This dTD don't push to dQH if current dTD point is
+ * not the last one in previous request.
+ */
+ if (hwep->qh.ptr->curr != cpu_to_le32(prevlastnode->dma))
+ goto done;
}
/* QH configuration */
@@ -676,6 +824,7 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
unsigned remaining_length;
unsigned actual = hwreq->req.length;
struct ci_hdrc *ci = hwep->ci;
+ bool is_isoc = hwep->type == USB_ENDPOINT_XFER_ISOC;
if (hwreq->req.status != -EALREADY)
return -EINVAL;
@@ -689,7 +838,7 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
int n = hw_ep_bit(hwep->num, hwep->dir);
if (ci->rev == CI_REVISION_24 ||
- ci->rev == CI_REVISION_22)
+ ci->rev == CI_REVISION_22 || is_isoc)
if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
reprime_dtd(ci, hwep, node);
hwreq->req.status = -EALREADY;
@@ -708,11 +857,15 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
hwreq->req.status = -EPROTO;
break;
} else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
- hwreq->req.status = -EILSEQ;
- break;
+ if (is_isoc) {
+ hwreq->req.status = 0;
+ } else {
+ hwreq->req.status = -EILSEQ;
+ break;
+ }
}
- if (remaining_length) {
+ if (remaining_length && !is_isoc) {
if (hwep->dir == TX) {
hwreq->req.status = -EPROTO;
break;
@@ -733,6 +886,10 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
&hwreq->req, hwep->dir);
+ /* sglist bounced */
+ if (hwreq->sgt.sgl)
+ sglist_do_debounce(hwreq, hwep->dir == RX);
+
hwreq->req.actual += actual;
if (hwreq->req.status)
@@ -960,6 +1117,12 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
return -EMSGSIZE;
}
+ if (ci->has_short_pkt_limit &&
+ hwreq->req.length > CI_MAX_REQ_SIZE) {
+ dev_err(hwep->ci->dev, "request length too big (max 16KB)\n");
+ return -EMSGSIZE;
+ }
+
/* first nuke then test link, e.g. previous status has not sent */
if (!list_empty(&hwreq->queue)) {
dev_err(hwep->ci->dev, "request already in queue\n");
@@ -1574,6 +1737,9 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
+ if (hwreq->sgt.sgl)
+ sglist_do_debounce(hwreq, false);
+
req->status = -ECONNRESET;
if (hwreq->req.complete != NULL) {
@@ -2063,7 +2229,7 @@ static irqreturn_t udc_irq(struct ci_hdrc *ci)
}
}
- if (USBi_UI & intr)
+ if ((USBi_UI | USBi_UEI) & intr)
isr_tr_complete_handler(ci);
if ((USBi_SLI & intr) && !(ci->suspended)) {