diff options
| -rw-r--r-- | Documentation/devicetree/bindings/pci/msm_pcie.txt | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_kms.c | 26 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_pool.c | 22 | ||||
| -rw-r--r-- | drivers/media/platform/msm/ais/isp/msm_isp.h | 1 | ||||
| -rw-r--r-- | drivers/media/platform/msm/ais/isp/msm_isp47.c | 4 | ||||
| -rw-r--r-- | drivers/pci/host/pci-msm.c | 28 | ||||
| -rw-r--r-- | net/rmnet_data/rmnet_data_handlers.c | 10 |
7 files changed, 60 insertions, 35 deletions
diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt index fc019bda50a7..bf3ad8a71c26 100644 --- a/Documentation/devicetree/bindings/pci/msm_pcie.txt +++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt @@ -97,6 +97,9 @@ Optional Properties: and assign for each endpoint. - qcom,ep-latency: The time (unit: ms) to wait for the PCIe endpoint to become stable after power on, before de-assert the PERST to the endpoint. + - qcom,switch-latency: The time (unit: ms) to wait for the PCIe endpoint's link + training with switch downstream port after the link between switch upstream + port and RC is up. - qcom,wr-halt-size: With base 2, this exponent determines the size of the data that PCIe core will halt on for each write transaction. - qcom,cpl-timeout: Completion timeout value. This value specifies the time range @@ -276,6 +279,7 @@ Example: qcom,smmu-exist; qcom,smmu-sid-base = <0x1480>; qcom,ep-latency = <100>; + qcom,switch-latency = <100>; qcom,wr-halt-size = <0xa>; /* 1KB */ qcom,cpl-timeout = <0x2>; diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 705ec2d0dfa2..a84d65195363 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -1260,12 +1260,6 @@ static int sde_kms_hw_init(struct msm_kms *kms) goto power_error; } - rc = sde_splash_parse_dt(dev); - if (rc) { - SDE_ERROR("parse dt for splash info failed: %d\n", rc); - goto power_error; - } - /* * Read the DISP_INTF_SEL register to check * whether early display is enabled in LK. @@ -1277,15 +1271,23 @@ static int sde_kms_hw_init(struct msm_kms *kms) } /* - * when LK has enabled early display, sde_splash_init should be - * called first. This function will first do bandwidth voting job - * because display hardware is accessing AHB data bus, otherwise - * device reboot will happen. Second is to check if the memory is - * reserved. + * when LK has enabled early display, sde_splash_parse_dt and + * sde_splash_init must be called. The first function is to parse the + * mandatory memory node for splash function, and the second function + * will first do bandwidth voting job, because display hardware is now + * accessing AHB data bus, otherwise device reboot will happen, and then + * to check if the memory is reserved. */ sinfo = &sde_kms->splash_info; - if (sinfo->handoff) + if (sinfo->handoff) { + rc = sde_splash_parse_dt(dev); + if (rc) { + SDE_ERROR("parse dt for splash info failed: %d\n", rc); + goto power_error; + } + sde_splash_init(&priv->phandle, kms); + } for (i = 0; i < sde_kms->catalog->vbif_count; i++) { u32 vbif_idx = sde_kms->catalog->vbif[i].id; diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c index c31a85b07447..685ce3ea968b 100644 --- a/drivers/gpu/msm/kgsl_pool.c +++ b/drivers/gpu/msm/kgsl_pool.c @@ -65,26 +65,19 @@ _kgsl_get_pool_from_order(unsigned int order) /* Map the page into kernel and zero it out */ static void -_kgsl_pool_zero_page(struct page *p, unsigned int pool_order) +_kgsl_pool_zero_page(struct page *p) { - int i; - - for (i = 0; i < (1 << pool_order); i++) { - struct page *page = nth_page(p, i); - void *addr = kmap_atomic(page); + void *addr = kmap_atomic(p); - memset(addr, 0, PAGE_SIZE); - dmac_flush_range(addr, addr + PAGE_SIZE); - kunmap_atomic(addr); - } + memset(addr, 0, PAGE_SIZE); + dmac_flush_range(addr, addr + PAGE_SIZE); + kunmap_atomic(addr); } /* Add a page to specified pool */ static void _kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p) { - _kgsl_pool_zero_page(p, pool->pool_order); - spin_lock(&pool->list_lock); list_add_tail(&p->lru, &pool->page_list); pool->page_count++; @@ -329,7 +322,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, } else return -ENOMEM; } - _kgsl_pool_zero_page(page, order); goto done; } @@ -349,7 +341,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, page = alloc_pages(gfp_mask, order); if (page == NULL) return -ENOMEM; - _kgsl_pool_zero_page(page, order); goto done; } } @@ -379,13 +370,12 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, } else return -ENOMEM; } - - _kgsl_pool_zero_page(page, order); } done: for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) { p = nth_page(page, j); + _kgsl_pool_zero_page(p); pages[pcount] = p; pcount++; } diff --git a/drivers/media/platform/msm/ais/isp/msm_isp.h b/drivers/media/platform/msm/ais/isp/msm_isp.h index 72a76d178aa8..86974eeb4a32 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp.h +++ b/drivers/media/platform/msm/ais/isp/msm_isp.h @@ -355,6 +355,7 @@ struct msm_vfe_hardware_info { uint32_t dmi_reg_offset; uint32_t min_ab; uint32_t min_ib; + uint32_t regulator_num; const char *regulator_names[]; }; diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.c b/drivers/media/platform/msm/ais/isp/msm_isp47.c index d63282f80aca..d33dc758aef9 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp47.c +++ b/drivers/media/platform/msm/ais/isp/msm_isp47.c @@ -2537,8 +2537,7 @@ int msm_vfe47_get_regulators(struct vfe_device *vfe_dev) int rc = 0; int i; - vfe_dev->vfe_num_regulators = - sizeof(*vfe_dev->hw_info->regulator_names) / sizeof(char *); + vfe_dev->vfe_num_regulators = vfe_dev->hw_info->regulator_num; vfe_dev->regulator_info = kzalloc(sizeof(struct msm_cam_regulator) * vfe_dev->vfe_num_regulators, GFP_KERNEL); @@ -2811,6 +2810,7 @@ struct msm_vfe_hardware_info vfe47_hw_info = { .dmi_reg_offset = 0xC2C, .axi_hw_info = &msm_vfe47_axi_hw_info, .stats_hw_info = &msm_vfe47_stats_hw_info, + .regulator_num = 3, .regulator_names = {"vdd", "camss-vdd", "mmagic-vdd"}, }; EXPORT_SYMBOL(vfe47_hw_info); diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index 217c7ce3f57b..84bc96d5bf64 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -278,6 +278,7 @@ #define PERST_PROPAGATION_DELAY_US_MIN 1000 #define PERST_PROPAGATION_DELAY_US_MAX 1005 +#define SWITCH_DELAY_MAX 20 #define REFCLK_STABILIZATION_DELAY_US_MIN 1000 #define REFCLK_STABILIZATION_DELAY_US_MAX 1005 #define LINK_UP_TIMEOUT_US_MIN 5000 @@ -626,6 +627,7 @@ struct msm_pcie_dev_t { bool ext_ref_clk; bool common_phy; uint32_t ep_latency; + uint32_t switch_latency; uint32_t wr_halt_size; uint32_t cpl_timeout; uint32_t current_bdf; @@ -1984,6 +1986,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev) dev->common_phy); PCIE_DBG_FS(dev, "ep_latency: %dms\n", dev->ep_latency); + PCIE_DBG_FS(dev, "switch_latency: %dms\n", + dev->switch_latency); PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n", dev->wr_halt_size); PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n", @@ -4675,7 +4679,15 @@ int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options) goto link_fail; } - msleep(500); + if (dev->switch_latency) { + PCIE_DBG(dev, "switch_latency: %dms\n", + dev->switch_latency); + if (dev->switch_latency <= SWITCH_DELAY_MAX) + usleep_range(dev->switch_latency * 1000, + dev->switch_latency * 1000); + else + msleep(dev->switch_latency); + } msm_pcie_config_controller(dev); @@ -6279,6 +6291,20 @@ static int msm_pcie_probe(struct platform_device *pdev) PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n", rc_idx, msm_pcie_dev[rc_idx].ep_latency); + msm_pcie_dev[rc_idx].switch_latency = 0; + ret = of_property_read_u32((&pdev->dev)->of_node, + "qcom,switch-latency", + &msm_pcie_dev[rc_idx].switch_latency); + + if (ret) + PCIE_DBG(&msm_pcie_dev[rc_idx], + "RC%d: switch-latency does not exist.\n", + rc_idx); + else + PCIE_DBG(&msm_pcie_dev[rc_idx], + "RC%d: switch-latency: 0x%x.\n", + rc_idx, msm_pcie_dev[rc_idx].switch_latency); + msm_pcie_dev[rc_idx].wr_halt_size = 0; ret = of_property_read_u32(pdev->dev.of_node, "qcom,wr-halt-size", diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c index ae60f35b363d..b17556c346ce 100644 --- a/net/rmnet_data/rmnet_data_handlers.c +++ b/net/rmnet_data/rmnet_data_handlers.c @@ -476,10 +476,12 @@ static rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb, if (likely((ckresult == RMNET_MAP_CHECKSUM_OK) || (ckresult == RMNET_MAP_CHECKSUM_SKIPPED))) skb->ip_summed |= CHECKSUM_UNNECESSARY; - else if (ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION - && ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT - && ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET - && ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) { + else if (ckresult != + RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION && + ckresult != RMNET_MAP_CHECKSUM_VALIDATION_FAILED && + ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT && + ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET && + ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) { rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM); return RX_HANDLER_CONSUMED; |
