lkml.org 
[lkml]   [2020]   [Jul]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [PATCH v5 03/10] dmaengine: Actions: Add support for S700 DMA engine
    Hi Amit,

    Thank you for the patch! Perhaps something to improve:

    [auto build test WARNING on robh/for-next]
    [also build test WARNING on clk/clk-next pza/reset/next linus/master v5.8-rc3 next-20200702]
    [If your patch is applied to the wrong git tree, kindly drop us a note.
    And when submitting patch, we suggest to use as documented in
    https://git-scm.com/docs/git-format-patch]

    url: https://github.com/0day-ci/linux/commits/Amit-Singh-Tomar/Add-MMC-and-DMA-support-for-Actions-S700/20200702-225741
    base: https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next
    config: arm64-randconfig-r002-20200701 (attached as .config)
    compiler: clang version 11.0.0 (https://github.com/llvm/llvm-project 003a086ffc0d1affbb8300b36225fb8150a2d40a)
    reproduce (this is a W=1 build):
    wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
    chmod +x ~/bin/make.cross
    # install arm64 cross compiling tool for clang build
    # apt-get install binutils-aarch64-linux-gnu
    # save the attached .config to linux build tree
    COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=arm64

    If you fix the issue, kindly add following tag as appropriate
    Reported-by: kernel test robot <lkp@intel.com>

    All warnings (new ones prefixed by >>):

    >> drivers/dma/owl-dma.c:1117:14: warning: cast to smaller integer type 'enum owl_dma_id' from 'const void *' [-Wvoid-pointer-to-enum-cast]
    od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev);
    ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    1 warning generated.

    vim +1117 drivers/dma/owl-dma.c

    1087
    1088 static int owl_dma_probe(struct platform_device *pdev)
    1089 {
    1090 struct device_node *np = pdev->dev.of_node;
    1091 struct owl_dma *od;
    1092 int ret, i, nr_channels, nr_requests;
    1093
    1094 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
    1095 if (!od)
    1096 return -ENOMEM;
    1097
    1098 od->base = devm_platform_ioremap_resource(pdev, 0);
    1099 if (IS_ERR(od->base))
    1100 return PTR_ERR(od->base);
    1101
    1102 ret = of_property_read_u32(np, "dma-channels", &nr_channels);
    1103 if (ret) {
    1104 dev_err(&pdev->dev, "can't get dma-channels\n");
    1105 return ret;
    1106 }
    1107
    1108 ret = of_property_read_u32(np, "dma-requests", &nr_requests);
    1109 if (ret) {
    1110 dev_err(&pdev->dev, "can't get dma-requests\n");
    1111 return ret;
    1112 }
    1113
    1114 dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
    1115 nr_channels, nr_requests);
    1116
    > 1117 od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev);
    1118
    1119 od->nr_pchans = nr_channels;
    1120 od->nr_vchans = nr_requests;
    1121
    1122 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
    1123
    1124 platform_set_drvdata(pdev, od);
    1125 spin_lock_init(&od->lock);
    1126
    1127 dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
    1128 dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
    1129 dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
    1130
    1131 od->dma.dev = &pdev->dev;
    1132 od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
    1133 od->dma.device_tx_status = owl_dma_tx_status;
    1134 od->dma.device_issue_pending = owl_dma_issue_pending;
    1135 od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
    1136 od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
    1137 od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
    1138 od->dma.device_config = owl_dma_config;
    1139 od->dma.device_pause = owl_dma_pause;
    1140 od->dma.device_resume = owl_dma_resume;
    1141 od->dma.device_terminate_all = owl_dma_terminate_all;
    1142 od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
    1143 od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
    1144 od->dma.directions = BIT(DMA_MEM_TO_MEM);
    1145 od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
    1146
    1147 INIT_LIST_HEAD(&od->dma.channels);
    1148
    1149 od->clk = devm_clk_get(&pdev->dev, NULL);
    1150 if (IS_ERR(od->clk)) {
    1151 dev_err(&pdev->dev, "unable to get clock\n");
    1152 return PTR_ERR(od->clk);
    1153 }
    1154
    1155 /*
    1156 * Eventhough the DMA controller is capable of generating 4
    1157 * IRQ's for DMA priority feature, we only use 1 IRQ for
    1158 * simplification.
    1159 */
    1160 od->irq = platform_get_irq(pdev, 0);
    1161 ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0,
    1162 dev_name(&pdev->dev), od);
    1163 if (ret) {
    1164 dev_err(&pdev->dev, "unable to request IRQ\n");
    1165 return ret;
    1166 }
    1167
    1168 /* Init physical channel */
    1169 od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans,
    1170 sizeof(struct owl_dma_pchan), GFP_KERNEL);
    1171 if (!od->pchans)
    1172 return -ENOMEM;
    1173
    1174 for (i = 0; i < od->nr_pchans; i++) {
    1175 struct owl_dma_pchan *pchan = &od->pchans[i];
    1176
    1177 pchan->id = i;
    1178 pchan->base = od->base + OWL_DMA_CHAN_BASE(i);
    1179 }
    1180
    1181 /* Init virtual channel */
    1182 od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans,
    1183 sizeof(struct owl_dma_vchan), GFP_KERNEL);
    1184 if (!od->vchans)
    1185 return -ENOMEM;
    1186
    1187 for (i = 0; i < od->nr_vchans; i++) {
    1188 struct owl_dma_vchan *vchan = &od->vchans[i];
    1189
    1190 vchan->vc.desc_free = owl_dma_desc_free;
    1191 vchan_init(&vchan->vc, &od->dma);
    1192 }
    1193
    1194 /* Create a pool of consistent memory blocks for hardware descriptors */
    1195 od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev,
    1196 sizeof(struct owl_dma_lli),
    1197 __alignof__(struct owl_dma_lli),
    1198 0);
    1199 if (!od->lli_pool) {
    1200 dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n");
    1201 return -ENOMEM;
    1202 }
    1203
    1204 clk_prepare_enable(od->clk);
    1205
    1206 ret = dma_async_device_register(&od->dma);
    1207 if (ret) {
    1208 dev_err(&pdev->dev, "failed to register DMA engine device\n");
    1209 goto err_pool_free;
    1210 }
    1211
    1212 /* Device-tree DMA controller registration */
    1213 ret = of_dma_controller_register(pdev->dev.of_node,
    1214 owl_dma_of_xlate, od);
    1215 if (ret) {
    1216 dev_err(&pdev->dev, "of_dma_controller_register failed\n");
    1217 goto err_dma_unregister;
    1218 }
    1219
    1220 return 0;
    1221
    1222 err_dma_unregister:
    1223 dma_async_device_unregister(&od->dma);
    1224 err_pool_free:
    1225 clk_disable_unprepare(od->clk);
    1226 dma_pool_destroy(od->lli_pool);
    1227
    1228 return ret;
    1229 }
    1230

    ---
    0-DAY CI Kernel Test Service, Intel Corporation
    https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
    [unhandled content-type:application/gzip]
    \
     
     \ /
      Last update: 2020-07-03 00:44    [W:7.635 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site