AM5728 IPC機制解析

1. platform bus layer的實現

Linux下TI IPC機制的驅動部分被包裝成了一個platform_driver,它的主要實現在drivers\remoteproc\omap_remoteproc.cdrivers\remoteproc\remoteproc_core.cdrivers\remoteproc\remoteproc_core.cdrivers\remoteproc\remoteproc_virtio.c等文件中。

1.1 dsp加載命令

DSP加載的命令如下所示:

rm /lib/firmware/dra7-dsp1-fw.xe66
#ln -s /home/root/yeshen/SCARA_AM5728_DSP2.out /lib/firmware/dra7-dsp2-fw.xe66
ln -s /home/root/yeshen/SCARA_AM5728_IPC.out /lib/firmware/dra7-dsp1-fw.xe66

# 將platform driver“omap-rproc”和platform device"40800000.dsp"解綁
echo 40800000.dsp > /sys/bus/platform/drivers/omap-rproc/unbind

# 將platform driver“omap-rproc”和platform device"40800000.dsp"綁定
echo 40800000.dsp > /sys/bus/platform/drivers/omap-rproc/bind

#echo 41000000.dsp > /sys/bus/platform/drivers/omap-rproc/unbind
#echo 41000000.dsp > /sys/bus/platform/drivers/omap-rproc/bind
/home/root/yeshen/app_host &
/home/root/yeshen/ftp &

其中的關鍵是platform driver“omap-rproc”和platform device"40800000.dsp"的適配,利用驅動適配的probe()函數來重新加載dsp firmware。

整個dsp加載過程的打印如下:

root@am57xx-evm:~# echo 40800000.dsp > /sys/bus/platform/drivers/omap-rproc/bind
[   41.879134] omap-rproc 40800000.dsp: assigned reserved memory node dsp1_cma@99000000
[   41.886963]  remoteproc0: 40800000.dsp is available
[   41.893186]  remoteproc0: Note: remoteproc is still under development and considered experimental.
[   41.902262]  remoteproc0: THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.
root@am57xx-evm:~# [   41.919745]  remoteproc0: powering up 40800000.dsp
[   41.926103]  remoteproc0: Booting fw image dra7-dsp1-fw.xe66, size 5174356
[   41.940158] omap_hwmod: mmu0_dsp1: _wait_target_disable failed
[   41.946051] omap-iommu 40d01000.mmu: 40d01000.mmu: version 3.0
[   41.951989] omap-iommu 40d02000.mmu: 40d02000.mmu: version 3.0
[   41.966769]  remoteproc0: remote processor 40800000.dsp is now up
[   41.973236] virtio_rpmsg_bus virtio0: rpmsg host is online
[   41.978798]  remoteproc0: registered virtio0 (type 7)
[   41.994406] virtio_rpmsg_bus virtio0: creating channel rpmsg-proto addr 0x3d

1.2 platform device

platform device “40800000.dsp” 在dts中定義:

dra7.dtsi:

		dsp1: dsp@40800000 {
			compatible = "ti,dra7-dsp";
			reg = <0x40800000 0x48000>,
			      <0x40e00000 0x8000>,
			      <0x40f00000 0x8000>;
			reg-names = "l2ram", "l1pram", "l1dram";
			ti,hwmods = "dsp1";
			syscon-bootreg = <&scm_conf 0x55c>;
			iommus = <&mmu0_dsp1>, <&mmu1_dsp1>;
			ti,rproc-standby-info = <0x4a005420>;
			status = "disabled";
		};
am57xx-beagle-x15-common.dtsi:

&dsp1 {
	status = "okay";
	memory-region = <&dsp1_cma_pool>;
	mboxes = <&mailbox5 &mbox_dsp1_ipc3x>;
	timers = <&timer5>;
};

platform device “41000000.dsp” 在dts中定義:

dra74x.dtsi:

		dsp2: dsp@41000000 {
			compatible = "ti,dra7-dsp";
			reg = <0x41000000 0x48000>,
			      <0x41600000 0x8000>,
			      <0x41700000 0x8000>;
			reg-names = "l2ram", "l1pram", "l1dram";
			ti,hwmods = "dsp2";
			syscon-bootreg = <&scm_conf 0x560>;
			iommus = <&mmu0_dsp2>, <&mmu1_dsp2>;
			ti,rproc-standby-info = <0x4a005620>;
			status = "disabled";
		};
am57xx-beagle-x15-common.dtsi:

&dsp2 {
	status = "okay";
	memory-region = <&dsp2_cma_pool>;
	mboxes = <&mailbox6 &mbox_dsp2_ipc3x>;
	timers = <&timer6>;
};

系統在這兩個platform device創建的時候,還給這它們附加了一個platform_data:

omap_generic_init() -> pdata_quirks_init() -> omap_auxdata_lookup[]:

static struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {

	OF_DEV_AUXDATA("ti,dra7-dsp", 0x40800000, "40800000.dsp",
		       &omap4_ipu_dsp_pdata),
	OF_DEV_AUXDATA("ti,dra7-dsp", 0x41000000, "41000000.dsp",
		       &omap4_ipu_dsp_pdata),

}

↓

static struct omap_rproc_pdata omap4_ipu_dsp_pdata = {
	.device_enable = omap_rproc_device_enable,
	.device_shutdown = omap_rproc_device_shutdown,
	.timer_ops = &omap_rproc_dmtimer_ops,
};

omap_rproc_device_enable()、omap_rproc_device_shutdown()在dsp的電源管理函數中負責上下電操作。

1.3 platform driver

platform driver “omap-rproc” 在drivers\remoteproc\omap_remoteproc.c文件中定義:

static struct platform_driver omap_rproc_driver = {
	.probe = omap_rproc_probe,
	.remove = omap_rproc_remove,
	.driver = {
		.name = "omap-rproc",
		.pm = &omap_rproc_pm_ops,
		.of_match_table = of_match_ptr(omap_rproc_of_match),
	},
};

static const struct of_device_id omap_rproc_of_match[] = {

	{
		.compatible     = "ti,dra7-ipu",
		.data           = dra7_rproc_dev_data,
	},
	
};

static const struct omap_rproc_dev_data dra7_rproc_dev_data[] = {
	{
		.device_name	= "40800000.dsp",
		.fw_name	= "dra7-dsp1-fw.xe66",
	},
	{
		.device_name	= "41000000.dsp",
		.fw_name	= "dra7-dsp2-fw.xe66",
	},

};

1.3.1 omap_rproc_probe()

在device和driver適配上以後,它的核心就是probe()初始化函數:

static int omap_rproc_probe(struct platform_device *pdev)
{
	struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
	struct device_node *np = pdev->dev.of_node;
	struct omap_rproc_timer_ops *timer_ops;
	struct omap_rproc *oproc;
	struct rproc *rproc;
	const char *firmware;
	u32 standby_addr = 0;
	int num_timers;
	int ret;

	if (!np) {
		dev_err(&pdev->dev, "only DT-based devices are supported\n");
		return -ENODEV;
	}

	/*
	 * self-manage the ordering dependencies between omap_device_enable/idle
	 * and omap_device_assert/deassert_hardreset API during runtime suspend
	 * and resume, rather than relying on the order in omap_device layer.
	 */
	if (pdev->dev.pm_domain) {
		dev_dbg(&pdev->dev, "device pm_domain is being reset for this remoteproc device\n");
		pdev->dev.pm_domain = NULL;
	}

    /* (0) pdata爲platform_device對應的platform_data,對應爲:
        static struct omap_rproc_pdata omap4_ipu_dsp_pdata = {
        	.device_enable = omap_rproc_device_enable,
        	.device_shutdown = omap_rproc_device_shutdown,
        	.timer_ops = &omap_rproc_dmtimer_ops,
        };
     */
	if (!pdata || !pdata->device_enable || !pdata->device_shutdown) {
		dev_err(&pdev->dev, "platform data is either missing or incomplete\n");
		return -ENODEV;
	}

    /* (1) 獲取到當前platform_device對應的firmware名稱,"40800000.dsp"對應的是"dra7-dsp1-fw.xe66" */
	firmware = omap_rproc_get_firmware(pdev);
	if (IS_ERR(firmware))
		return PTR_ERR(firmware);

	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
	if (ret) {
		dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
		return ret;
	}

    /* (2) 分配rproc(oproc)結構體,其中重要成員有:
        rproc->firmware = "dra7-dsp1-fw.xe66";
    	rproc->name = "40800000.dsp";
    	rproc->ops = &omap_rproc_ops;
        rproc->fw_ops = &rproc_elf_fw_ops;
     */
	rproc = rproc_alloc(&pdev->dev, dev_name(&pdev->dev), &omap_rproc_ops,
			    firmware, sizeof(*oproc));
	if (!rproc)
		return -ENOMEM;

	oproc = rproc->priv;
	oproc->rproc = rproc;
	/* All existing OMAP IPU and DSP processors have an MMU */
    /* (3) 默認所有OMAP系列的IPU和DSP都有MMU */
	rproc->has_iommu = true;

    /* (4) 獲取dsp內部mem的地址
        dts中的定義:
			reg = <0x40800000 0x48000>,
			      <0x40e00000 0x8000>,
			      <0x40f00000 0x8000>;
			reg-names = "l2ram", "l1pram", "l1dram";
        含義說明:
            internal RAM memory region, "l2ram" for L2 RAM,
            "l1pram" for L1 Program RAM Memory/Cache,
            "l1dram" for L1 Data RAM Memory/Cache,
        最終的計算值:
            // L2RAM:
            // cpu側看到dsp L2RAM的虛擬地址:
            oproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
            // cpu側看到dsp L2RAM的物理地址:
            oproc->mem[0].bus_addr = 0x40800000;
            // The DSPs have the internal memories starting at a fixed offset of 0x800000 from address 0, and this corresponds to L2RAM.
            // dsp側看到dsp L2RAM的地址:
            oproc->mem[0].dev_addr = 0x00800000;
        對應寄存器:
            DSP1_L2_SRAM 0x4080_0000
     */
	ret = omap_rproc_of_get_internal_memories(pdev, rproc);
	if (ret)
		goto free_rproc;

    /* (5) 獲取dsp boot寄存器的地址
         dts中的定義:
            syscon-bootreg = <&scm_conf 0x55c>;

            l4_cfg: l4@4a000000 {
    			compatible = "ti,dra7-l4-cfg", "simple-bus";
    			#address-cells = <1>;
    			#size-cells = <1>;
    			ranges = <0 0x4a000000 0x22c000>;
    
    			scm: scm@2000 {
    				compatible = "ti,dra7-scm-core", "simple-bus";
    				reg = <0x2000 0x2000>;
    				#address-cells = <1>;
    				#size-cells = <1>;
    				ranges = <0 0x2000 0x2000>;
    
    				scm_conf: scm_conf@0 {
    					compatible = "syscon", "simple-bus";
    					reg = <0x0 0x1400>;
         含義說明:
            syscon-bootreg:	
            Should be a pair of 
            the phandle to the System Control Configuration region that contains the boot address register, 
            and the register offset of the boot address register within the System Control module.
         最終的計算值:
            oproc->boot_data->syscon = 0x4a002000
            oproc->boot_data->boot_reg = 0x55c;
            oproc->boot_data->boot_reg_shift = 10;
         對應的寄存器:
            CTRL_CORE_CONTROL_DSP1_RST_VECT 0x4A00255C
     */
	ret = omap_rproc_get_boot_data(pdev, rproc);
	if (ret)
		goto free_rproc;

	timer_ops = pdata->timer_ops;
	/*
	 * Timer nodes are directly used in client nodes as phandles, so
	 * retrieve the count using NULL as cells-name.
	 * XXX: Use the much simpler of_property_count_elems_of_size
	 * if available
	 */
    /* (6) 獲取timer的數量:
        dts中的定義:
            timers = <&timer5>;
        含義說明:
            timers:		
            One or more phandles to OMAP DMTimer nodes, that serve as System/Tick timers for the OS running on the remote processors. 
            This will usually be a single timer if the processor sub-system is running in SMP mode, or one per core in the processor sub-system.
			This property is mandatory on remote processors requiring external tick wakeup, and to support Power Management features. 
            The timers to be used should match with the timers used in the firmware image.
     */
	oproc->num_timers = of_count_phandle_with_args(np, "timers", NULL);
	if (oproc->num_timers <= 0) {
		dev_dbg(&pdev->dev, "device does not have timers, status = %d\n",
			oproc->num_timers);
		oproc->num_timers = 0;
	} else {
		if (!timer_ops || !timer_ops->request_timer ||
		    !timer_ops->release_timer || !timer_ops->start_timer ||
		    !timer_ops->stop_timer) {
			dev_err(&pdev->dev, "device does not have required timer ops\n");
			ret = -ENODEV;
			goto free_rproc;
		}
	}

#ifdef CONFIG_OMAP_REMOTEPROC_WATCHDOG
	oproc->num_wd_timers = of_count_phandle_with_args(np, "watchdog-timers",
							  NULL);
	if (oproc->num_wd_timers <= 0) {
		dev_dbg(&pdev->dev, "device does not have watchdog timers, status = %d\n",
			oproc->num_wd_timers);
		oproc->num_wd_timers = 0;
	} else {
		if (!timer_ops || !timer_ops->get_timer_irq ||
		    !timer_ops->ack_timer_irq) {
			dev_err(&pdev->dev, "device does not have required watchdog timer ops\n");
			ret = -ENODEV;
			goto free_rproc;
		}
	}
#endif

    /* (6.1) 給timer分配結構空間 */
	if (oproc->num_timers || oproc->num_wd_timers) {
		num_timers = oproc->num_timers + oproc->num_wd_timers;
		oproc->timers = devm_kzalloc(&pdev->dev, sizeof(*oproc->timers)
					     * num_timers, GFP_KERNEL);
		if (!oproc->timers) {
			ret = -ENOMEM;
			goto free_rproc;
		}

		dev_dbg(&pdev->dev, "device has %d tick timers and %d watchdog timers\n",
			oproc->num_timers, oproc->num_wd_timers);
	}

	init_completion(&oproc->pm_comp);

    /* (7) 獲取autosuspend的值:
         oproc->autosuspend_delay = 10000 ms
     */
	oproc->autosuspend_delay = omap_rproc_get_autosuspend_delay(pdev);
	if (oproc->autosuspend_delay < 0)
		goto free_rproc;

    /* (8) 獲取standby狀態寄存器的地址
        dts中的定義:
            ti,rproc-standby-info = <0x4a005420>;
        含義說明:
            ti,rproc-standby-info: 
            Standby data for the remote processor. 
            This is mandatory to support Power Management for the OMAP remoteprocs, and should contain the address containing the module standby status.
        最終的計算值:
            standby_addr = 0x4a005420
            oproc->standby_addr = 0x4a005420映射的虛擬地址
        對應寄存器:
            CM_DSP1_DSP1_CLKCTRL 0x4A00 5420
     */
	ret = of_property_read_u32(np, "ti,rproc-standby-info", &standby_addr);
	if (ret || !standby_addr)
		goto free_rproc;

	oproc->standby_addr = devm_ioremap(&pdev->dev, standby_addr,
					   sizeof(u32));
	if (!oproc->standby_addr)
		goto free_rproc;

    /* (9) 分配對應的保留內存
        dts中的定義:
            memory-region = <&dsp1_cma_pool>;

    		dsp1_cma_pool: dsp1_cma@99000000 {
    			compatible = "shared-dma-pool";
    			reg = <0x0 0x99000000 0x0 0x4000000>;
    			reusable;
    			status = "okay";
    		};
        含義說明:
            memory-region:	
            phandle to the reserved memory node to be associated with the remoteproc device. 
            The reserved memory node should be a CMA memory node, and should be defined as per the bindings
        最終的計算值:
            dev->cma_area = cma;   // 把物理地址0x99000000開始的64M保留內存分配給dsp1
     */
	if (of_reserved_mem_device_init(&pdev->dev)) {
		dev_err(&pdev->dev, "device does not have specific CMA pool\n");
		goto free_rproc;
	}

    /* (10) pdev->dev->driver_data = rproc */
	platform_set_drvdata(pdev, rproc);

    /* (11) 把各種參數準備完成後,來到最核心的初始化函數 */
	ret = rproc_add(rproc);
	if (ret)
		goto release_mem;

	if (rproc_get_alias_id(rproc) < 0)
		dev_warn(&pdev->dev, "device does not have an alias id\n");

	return 0;

release_mem:
	of_reserved_mem_device_release(&pdev->dev);
free_rproc:
	rproc_put(rproc);
	return ret;
}

1.3.2 rproc_add()

omap_rproc_probe() -> rproc_add():

int rproc_add(struct rproc *rproc)
{
	struct device *dev = &rproc->dev;
	int ret;

	ret = device_add(dev);
	if (ret < 0)
		return ret;

	dev_info(dev, "%s is available\n", rproc->name);

	dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
	dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n");

	/* create debugfs entries */
    /* (11.1) */
	rproc_create_debug_dir(rproc);
    /* (11.2) */
	ret = rproc_add_virtio_devices(rproc);
	if (ret < 0)
		return ret;

	/* expose to rproc_get_by_phandle users */
	mutex_lock(&rproc_list_mutex);
	list_add(&rproc->node, &rproc_list);
	mutex_unlock(&rproc_list_mutex);

	return 0;
}

首先打印了一段初始化信息:

[   41.886963]  remoteproc0: 40800000.dsp is available
[   41.893186]  remoteproc0: Note: remoteproc is still under development and considered experimental.
[   41.902262]  remoteproc0: THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.

然後rproc_create_debug_dir()創建了對應的debugfs調試接口:

root@am57xx-evm:~# ls /sys/kernel/debug/remoteproc/remoteproc0/
name      recovery  state     trace0    version
root@am57xx-evm:~# cat /sys/kernel/debug/remoteproc/remoteproc0/name   
40800000.dsp
root@am57xx-evm:~#
root@am57xx-evm:~# cat /sys/kernel/debug/remoteproc/remoteproc1/name 
4b234000.pru0
root@am57xx-evm:~# cat /sys/kernel/debug/remoteproc/remoteproc2/name 
4b238000.pru1
root@am57xx-evm:~# cat /sys/kernel/debug/remoteproc/remoteproc3/name 
4b2b4000.pru0
root@am57xx-evm:~# cat /sys/kernel/debug/remoteproc/remoteproc4/name 
4b2b8000.pru1

最後繼續調用rproc_add_virtio_devices()進行初始化。

1.3.3 rproc_add_virtio_devices()

omap_rproc_probe() -> rproc_add() -> rproc_add_virtio_devices():

static int rproc_add_virtio_devices(struct rproc *rproc)
{
	int ret;

	/* nothing to do if relying on external userspace loader */
	if (rproc->use_userspace_loader)
		return 0;

	/* rproc_del() calls must wait until async loader completes */
	init_completion(&rproc->firmware_loading_complete);

	/*
	 * We must retrieve early virtio configuration info from
	 * the firmware (e.g. whether to register a virtio device,
	 * what virtio features does it support, ...).
	 *
	 * We're initiating an asynchronous firmware loading, so we can
	 * be built-in kernel code, without hanging the boot process.
	 */
    /* (11.2.1) */
	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
				      rproc->firmware, &rproc->dev, GFP_KERNEL,
				      rproc, rproc_fw_config_virtio);
	if (ret < 0) {
		dev_err(&rproc->dev, "request_firmware_nowait err: %d\n", ret);
		complete_all(&rproc->firmware_loading_complete);
	}

	return ret;
}

↓

request_firmware_nowait() → request_firmware_work_func()

static void request_firmware_work_func(struct work_struct *work)
{
	struct firmware_work *fw_work;
	const struct firmware *fw;

	fw_work = container_of(work, struct firmware_work, work);

    /* (11.2.1.1) 從以下路徑中搜尋指定的firmware文件dra7-dsp1-fw.xe66,
            static const char * const fw_path[] = {
            	fw_path_para,
            	"/lib/firmware/updates/" UTS_RELEASE,
            	"/lib/firmware/updates",
            	"/lib/firmware/" UTS_RELEASE,
            	"/lib/firmware"
            };
        把文件內容加載內存fw結構體中
     */
	_request_firmware(&fw, fw_work->name, fw_work->device,
			  fw_work->opt_flags);

    /* (11.2.1.2) 調用前面指定的rproc_fw_config_virtio()函數 */
	fw_work->cont(fw, fw_work->context);
	put_device(fw_work->device); /* taken in request_firmware_nowait() */

	module_put(fw_work->module);
	kfree_const(fw_work->name);
	kfree(fw_work);
}

↓

在rproc_fw_config_virtio()函數中調用前面註冊的rproc->fw_ops = &rproc_elf_fw_ops對dsp firmware文件的操作函數集,根據elf格式來解析dsp firmware文件中的各個部分並處理:

static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
{
	struct rproc *rproc = context;
	struct resource_table *table;
	int ret, tablesz;

    /* (11.2.1.2.1) 對elf文件的完整性進行校驗 */
	if (rproc_fw_sanity_check(rproc, fw) < 0)
		goto out;

	/* look for the resource table */
    /* (11.2.1.2.2) 在elf文件中查找資源表 */
	table = rproc_find_rsc_table(rproc, fw,  &tablesz);
	if (!table)
		goto out;

	rproc->table_csum = crc32(0, table, tablesz);

	/*
	 * Create a copy of the resource table. When a virtio device starts
	 * and calls vring_new_virtqueue() the address of the allocated vring
	 * will be stored in the cached_table. Before the device is started,
	 * cached_table will be copied into devic memory.
	 */
    /* (11.2.1.2.3) 將資源表cache到rproc->cached_table中 */
	rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
	if (!rproc->cached_table)
		goto out;

	rproc->table_ptr = rproc->cached_table;

	/* count the number of notify-ids */
	rproc->max_notifyid = -1;
    /* (11.2.1.2.4) 遍歷資源表,
        使用rproc_count_vrings()來統計RSC_VDEV類型資源的vrings數量 
     */
	ret = rproc_handle_resources(rproc, tablesz,
				     rproc_count_vrings_handler);
	if (ret)
		goto out;

	/* look for virtio devices and register them */
    /* (11.2.1.2.5) 遍歷資源表,
        使用rproc_handle_vdev()來把RSC_VDEV類型的資源註冊成virtio devices
     */
	ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler);

out:
	release_firmware(fw);
	if (!rproc->use_userspace_loader) {
		/* allow rproc_del() contexts, if any, to proceed */
		complete_all(&rproc->firmware_loading_complete);
	}
}

↓

2. virtio bus layer的實現

2.1 virtio device

承接platform driver的probe過程,在讀出firmware中的resource table以後,遍歷resource table把其中的RSC_VDEV類型的resource(struct fw_rsc_vdev - virtio device header)註冊成virtio device。

omap_rproc_probe() -> rproc_add() -> rproc_add_virtio_devices() -> rproc_fw_config_virtio() -> rproc_vdev_handler[] -> rproc_handle_vdev():

static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
			     int offset, int avail)
{
	struct device *dev = &rproc->dev;
	struct rproc_vdev *rvdev;
	int i, ret;

	/* make sure resource isn't truncated */
	if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
			+ rsc->config_len > avail) {
		dev_err(dev, "vdev rsc is truncated\n");
		return -EINVAL;
	}

	/* make sure reserved bytes are zeroes */
	if (rsc->reserved[0] || rsc->reserved[1]) {
		dev_err(dev, "vdev rsc has non zero reserved bytes\n");
		return -EINVAL;
	}

	dev_dbg(dev, "vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n",
		rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);

	/* we currently support only two vrings per rvdev */
    /* (11.2.1.2.5.1) 每個vdev reource可能包含多個vring,目前不能超過2個 */ 
	if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) {
		dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings);
		return -EINVAL;
	}

    /* (11.2.1.2.5.2) 分配rvdev結構體內存空間 */
	rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL);
	if (!rvdev)
		return -ENOMEM;

	rvdev->rproc = rproc;

	/* parse the vrings */
    /* (11.2.1.2.5.3) 解析vdev reource中的vring信息保存到rvdev中 */
	for (i = 0; i < rsc->num_of_vrings; i++) {
		ret = rproc_parse_vring(rvdev, rsc, i);
		if (ret)
			goto free_rvdev;
	}

	/* remember the resource offset*/
    /* (11.2.1.2.5.4) 保存vdev reource在resource table中的偏移到rvdev中 */
	rvdev->rsc_offset = offset;

	list_add_tail(&rvdev->node, &rproc->rvdevs);

	/* it is now safe to add the virtio device */
    /* (11.2.1.2.5.5) 根據rvdev來註冊對應的virtio device */
	ret = rproc_add_virtio_dev(rvdev, rsc->id);
	if (ret)
		goto remove_rvdev;

	return 0;

remove_rvdev:
	list_del(&rvdev->node);
free_rvdev:
	kfree(rvdev);
	return ret;
}

↓

int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
{
	struct rproc *rproc = rvdev->rproc;
	struct device *dev = &rproc->dev;
	struct virtio_device *vdev = &rvdev->vdev;
	int ret;

    /* (11.2.1.2.5.5.1) 給rvdev->vdev的一些成員賦值
        id值是來自於resource的 rsc->id
        這個id值也非常重要,後續的virtio dirver使用這個id值來和virtio device進行適配
        dsp firmware的resource table中RSC_VDEV類型resource的id爲:
            #define VIRTIO_ID_RPMSG		7      // virtio remote processor messaging
     */
	vdev->id.device	= id,
    /* (11.2.1.2.5.5.2) 給rvdev->vdev的一些成員賦值
        其中最重要的是config操作函數集,rproc_virtio_config_ops:
            static const struct virtio_config_ops rproc_virtio_config_ops = {
            	.get_features	= rproc_virtio_get_features,
            	.finalize_features = rproc_virtio_finalize_features,
            	.find_vqs	= rproc_virtio_find_vqs,
            	.del_vqs	= rproc_virtio_del_vqs,
            	.reset		= rproc_virtio_reset,
            	.set_status	= rproc_virtio_set_status,
            	.get_status	= rproc_virtio_get_status,
            	.get		= rproc_virtio_get,
            	.set		= rproc_virtio_set,
            };
        後面對virtio device的操作很多都需要調用上述的函數
     */
	vdev->config = &rproc_virtio_config_ops,
	vdev->dev.parent = dev;
	vdev->dev.release = rproc_vdev_release;

	/*
	 * We're indirectly making a non-temporary copy of the rproc pointer
	 * here, because drivers probed with this vdev will indirectly
	 * access the wrapping rproc.
	 *
	 * Therefore we must increment the rproc refcount here, and decrement
	 * it _only_ when the vdev is released.
	 */
	get_device(&rproc->dev);

    /* (11.2.1.2.5.5.3) 註冊virtio device */
	ret = register_virtio_device(vdev);
	if (ret) {
		put_device(&rproc->dev);
		dev_err(dev, "failed to register vdev: %d\n", ret);
		goto out;
	}

	dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);

out:
	return ret;
}

↓

int register_virtio_device(struct virtio_device *dev)
{
	int err;

	dev->dev.bus = &virtio_bus;

	/* Assign a unique device index and hence name. */
	err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL);
	if (err < 0)
		goto out;

	dev->index = err;
	dev_set_name(&dev->dev, "virtio%u", dev->index);

	spin_lock_init(&dev->config_lock);
	dev->config_enabled = false;
	dev->config_change_pending = false;

	/* We always start by resetting the device, in case a previous
	 * driver messed it up.  This also tests that code path a little. */
	dev->config->reset(dev);

	/* Acknowledge that we've seen the device. */
	add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);

	INIT_LIST_HEAD(&dev->vqs);

	/* device_register() causes the bus infrastructure to look for a
	 * matching driver. */
	err = device_register(&dev->dev);
out:
	if (err)
		add_status(dev, VIRTIO_CONFIG_S_FAILED);
	return err;
}

系統中創建的virtio device如下:

root@am57xx-evm:~# ls /sys/bus/virtio/devices/   
virtio0  virtio1  virtio2  virtio3  virtio4
root@am57xx-evm:~#
root@am57xx-evm:~# ls -l /sys/bus/virtio/devices/virtio0/
-r--r--r--    1 root     root          4096 Oct  3 21:14 device
lrwxrwxrwx    1 root     root             0 Oct  3 21:14 driver -> ../../../../../../bus/virtio/drivers/virtio_rpmsg_bus
-r--r--r--    1 root     root          4096 Oct  3 21:14 features
-r--r--r--    1 root     root          4096 Oct  3 21:14 modalias
drwxr-xr-x    2 root     root             0 Oct  3 21:14 power
drwxr-xr-x    3 root     root             0 Oct  3 21:11 rpmsg5
-r--r--r--    1 root     root          4096 Oct  3 21:14 status
lrwxrwxrwx    1 root     root             0 Oct  3 21:11 subsystem -> ../../../../../../bus/virtio
-rw-r--r--    1 root     root          4096 Oct  3 21:11 uevent
-r--r--r--    1 root     root          4096 Oct  3 21:14 vendor
root@am57xx-evm:~# cat /sys/bus/virtio/devices/virtio0/modalias 
virtio:d00000007v00000000

2.2 virtio driver

drivers\rpmsg\virtio_rpmsg_bus.c文件中定義了對應的virtio驅動:

static struct virtio_device_id id_table[] = {
	{ VIRTIO_ID_RPMSG, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

static unsigned int features[] = {
	VIRTIO_RPMSG_F_NS,
};

static struct virtio_driver virtio_ipc_driver = {
	.feature_table	= features,
	.feature_table_size = ARRAY_SIZE(features),
	.driver.name	= KBUILD_MODNAME,
	.driver.owner	= THIS_MODULE,
	.id_table	= id_table,
	.probe		= rpmsg_probe,
	.remove		= rpmsg_remove,
};

在vdev->id.device爲VIRTIO_ID_RPMSGvirtio device註冊以後,對應virtio driver的probe()函數被調用:

static int rpmsg_probe(struct virtio_device *vdev)
{
	vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
	static const char * const names[] = { "input", "output" };
	struct virtqueue *vqs[2];
	struct virtproc_info *vrp;
	void *bufs_va;
	int err = 0, i;
	size_t total_buf_space;
	bool notify;

	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
	if (!vrp)
		return -ENOMEM;

	vrp->vdev = vdev;

	idr_init(&vrp->endpoints);
	mutex_init(&vrp->endpoints_lock);
	mutex_init(&vrp->tx_lock);
	init_waitqueue_head(&vrp->sendq);

	/* We expect two virtqueues, rx and tx (and in this order) */
    /* (1) 在vdev中找到兩個隊列,分別命名爲{ "input", "output" },callback分配設置爲{ rpmsg_recv_done, rpmsg_xmit_done }
            一個爲接收隊列:vrp->rvq = vqs[0];
            一個爲發送隊列:vrp->svq = vqs[1];
     */
	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
	if (err)
		goto free_vrp;

	vrp->rvq = vqs[0];
	vrp->svq = vqs[1];

	/* we expect symmetric tx/rx vrings */
	WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
		virtqueue_get_vring_size(vrp->svq));

	/* we need less buffers if vrings are small */
    /* (2) 計算接收&發送隊列需要的內存大小 */
	if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
		vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
	else
		vrp->num_bufs = MAX_RPMSG_NUM_BUFS;

	total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;

	/* allocate coherent memory for the buffers */
    /* (3) 在dma內存中分配接收&發送隊列需要的內存 */
	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
				     total_buf_space, &vrp->bufs_dma,
				     GFP_KERNEL);
	if (!bufs_va) {
		err = -ENOMEM;
		goto vqs_del;
	}

	dev_dbg(&vdev->dev, "buffers: va %p, dma %pad\n", bufs_va,
		&vrp->bufs_dma);

	/* half of the buffers is dedicated for RX */
	vrp->rbufs = bufs_va;

	/* and half is dedicated for TX */
	vrp->sbufs = bufs_va + total_buf_space / 2;

	/* set up the receive buffers */
    /* (4) 設置接收隊列的buf地址 */
	for (i = 0; i < vrp->num_bufs / 2; i++) {
		struct scatterlist sg;
		void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;

		rpmsg_sg_init_one(vrp, &sg, cpu_addr, RPMSG_BUF_SIZE);

		err = virtqueue_add_inbuf_rpmsg(vrp->rvq, &sg, 1, cpu_addr,
						GFP_KERNEL);
		WARN_ON(err); /* sanity check; this can't really happen */
	}

	/* suppress "tx-complete" interrupts */
	virtqueue_disable_cb(vrp->svq);

	vdev->priv = vrp;

	/* if supported by the remote processor, enable the name service */
    /* (5) 如果virtio device支持name service,在隊列的基礎上創建對應的通道 */
	if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
		/* a dedicated endpoint handles the name service msgs */
		vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
						vrp, RPMSG_NS_ADDR);
		if (!vrp->ns_ept) {
			dev_err(&vdev->dev, "failed to create the ns ept\n");
			err = -ENOMEM;
			goto free_coherent;
		}
	}

	/*
	 * Prepare to kick but don't notify yet - we can't do this before
	 * device is ready.
	 */
    
	notify = virtqueue_kick_prepare(vrp->rvq);

	/* From this point on, we can notify and get callbacks. */
	virtio_device_ready(vdev);

	/* tell the remote processor it can start sending messages */
	/*
	 * this might be concurrent with callbacks, but we are only
	 * doing notify, not a full kick here, so that's ok.
	 */
    /* (6)  */
	if (notify)
		virtqueue_notify(vrp->rvq);

    /* (7) 對應打印:
        [   14.054702] virtio_rpmsg_bus virtio0: rpmsg host is online
     */
	dev_info(&vdev->dev, "rpmsg host is online\n");

	return 0;

free_coherent:
	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
			  bufs_va, vrp->bufs_dma);
vqs_del:
	vdev->config->del_vqs(vrp->vdev);
free_vrp:
	kfree(vrp);
	return err;
}

2.2.1 rproc_boot()

在probe()的過程中有一個重要的工作被隱含實現了,就是把firmware加載到dsp並使其運行了起來,這個的核心是在rproc_boot()中實現的。

rpmsg_probe() -> rproc_virtio_config_ops -> rproc_virtio_find_vqs() -> rproc_boot():

static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
				 struct virtqueue *vqs[],
				 vq_callback_t *callbacks[],
				 const char * const names[])
{
	struct rproc *rproc = vdev_to_rproc(vdev);
	int i, ret;

    /* (1.1) 根據rvdev->vring[i],分配隊列的消息描述符地址,並創建新的vqs[i]結構 */
	for (i = 0; i < nvqs; ++i) {
		vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]);
		if (IS_ERR(vqs[i])) {
			ret = PTR_ERR(vqs[i]);
			goto error;
		}
	}

	/* now that the vqs are all set, boot the remote processor */
    /* (1.2) 隊列已經準備就緒,現在把遠端處理器啓動起來 */
	ret = rproc_boot(rproc);
	if (ret) {
		dev_err(&rproc->dev, "rproc_boot() failed %d\n", ret);
		goto error;
	}

	return 0;

error:
	__rproc_virtio_del_vqs(vdev);
	return ret;
}

↓

static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
				    unsigned id,
				    void (*callback)(struct virtqueue *vq),
				    const char *name)
{
	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
	struct rproc *rproc = vdev_to_rproc(vdev);
	struct device *dev = &rproc->dev;
	struct rproc_vring *rvring;
	struct virtqueue *vq;
	void *addr;
	int len, size, ret;

	/* we're temporarily limited to two virtqueues per rvdev */
	if (id >= ARRAY_SIZE(rvdev->vring))
		return ERR_PTR(-EINVAL);

	if (!name)
		return NULL;

    /* (1.1.1) 根據rvdev->vring[i]的配置:
            rvdev->vring[i]->len        // 指定了隊列中消息的個數
            rvdev->vring[i]->align      // 指定了消息描述符的對齊
        分配隊列描述符等控制結構空間:
            rvdev->vring[i]->va = va;   // 分配的虛擬地址
	        rvdev->vring[i]->dma = dma; // 分配的物理地址
     */
	ret = rproc_alloc_vring(rvdev, id);
	if (ret)
		return ERR_PTR(ret);

	rvring = &rvdev->vring[id];
	addr = rvring->va;
	len = rvring->len;

	/* zero vring */
	size = vring_size(len, rvring->align);
	memset(addr, 0, size);

	dev_dbg(dev, "vring%d: va %p qsz %d notifyid %d\n",
		id, addr, len, rvring->notifyid);

	/*
	 * Create the new vq, and tell virtio we're not interested in
	 * the 'weak' smp barriers, since we're talking with a real device.
	 */
    /* (1.1.2) 創建新的隊列結構體virtqueue,來引用和指向rvdev->vring[i] */
	vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, addr,
				 rproc_virtio_notify, callback, name);
	if (!vq) {
		dev_err(dev, "vring_new_virtqueue %s failed\n", name);
		rproc_free_vring(rvring);
		return ERR_PTR(-ENOMEM);
	}

	rvring->vq = vq;
	vq->priv = rvring;

	return vq;
}

接下來我們分析最重要的部分,遠端處理器的啓動過程:

int rproc_boot(struct rproc *rproc)
{
	const struct firmware *firmware_p = NULL;
	struct device *dev;
	int ret;

	if (!rproc) {
		pr_err("invalid rproc handle\n");
		return -EINVAL;
	}

	dev = &rproc->dev;

	ret = mutex_lock_interruptible(&rproc->lock);
	if (ret) {
		dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
		return ret;
	}

	/* loading a firmware is required */
	if (!rproc->firmware) {
		dev_err(dev, "%s: no firmware to load\n", __func__);
		ret = -EINVAL;
		goto unlock_mutex;
	}

	/* prevent underlying implementation from being removed */
	if (!try_module_get(dev->parent->driver->owner)) {
		dev_err(dev, "%s: can't get owner\n", __func__);
		ret = -EINVAL;
		goto unlock_mutex;
	}

	/* skip the boot process if rproc is already powered up */
	if (atomic_inc_return(&rproc->power) > 1) {
		ret = 0;
		goto unlock_mutex;
	}

    /* (1.2.1) */
	dev_info(dev, "powering up %s\n", rproc->name);

	if (!rproc->use_userspace_loader) {
		/* load firmware */
        /* (1.2.2) 將firmware固件加載到內存 */
		ret = request_firmware(&firmware_p, rproc->firmware, dev);
		if (ret < 0) {
			dev_err(dev, "request_firmware failed: %d\n", ret);
			goto downref_rproc;
		}
	}

    /* (1.2.3) 使用firmware固件啓動 */
	ret = rproc_fw_boot(rproc, firmware_p);

	if (!rproc->use_userspace_loader)
		release_firmware(firmware_p);

downref_rproc:
	if (ret) {
		module_put(dev->parent->driver->owner);
		atomic_dec(&rproc->power);
	}
unlock_mutex:
	mutex_unlock(&rproc->lock);
	return ret;
}

↓

static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
{
	struct device *dev = &rproc->dev;
	const char *name = rproc->firmware;
	struct resource_table *table, *loaded_table;
	int ret, tablesz, versz;
	const char *version;

	if (!rproc->table_ptr)
		return -ENOMEM;

    /* (1.2.3.1) firmware固件合法性檢查 */
	ret = rproc_fw_sanity_check(rproc, fw);
	if (ret)
		return ret;

	if (!rproc->use_userspace_loader)
		dev_info(dev, "Booting fw image %s, size %zd\n",
			 name, fw->size);
	else
		dev_info(dev, "Booting unspecified pre-loaded fw image\n");

	/*
	 * if enabling an IOMMU isn't relevant for this rproc, this is
	 * just a nop
	 */
    /* (1.2.3.2) 使能iommu */
	ret = rproc_enable_iommu(rproc);
	if (ret) {
		dev_err(dev, "can't enable iommu: %d\n", ret);
		return ret;
	}

    /* (1.2.3.3) 從firmware中獲取到boot啓動地址 */
	rproc->bootaddr = rproc_get_boot_addr(rproc, fw);
	ret = -EINVAL;

	/* look for the resource table */
    /* (1.2.3.4) 處理firmware中的resource table */
	table = rproc_find_rsc_table(rproc, fw, &tablesz);
	if (!table)
		goto clean_up;

	/* Verify that resource table in loaded fw is unchanged */
	if (rproc->table_csum != crc32(0, table, tablesz)) {
		dev_err(dev, "resource checksum failed, fw changed?\n");
		goto clean_up;
	}

	/* handle fw resources which are required to boot rproc */
    /* (1.2.3.4.1)
            對於RSC_CARVEOUT類型的resource,分配dma內存空間並進行iommu映射
            對於RSC_DEVMEM類型的resource,直接進行iommu映射
            對於RSC_TRACE類型的resource,創建對應的trace文件
     */
	ret = rproc_handle_resources(rproc, tablesz, rproc_loading_handlers);
	if (ret) {
		dev_err(dev, "Failed to process resources: %d\n", ret);
		goto clean_up;
	}

	/* look for the firmware version, and store if present */
    /* (1.2.3.5) 處理firmware中的version:
            存儲version信息到rproc->fw_version中 
     */
	version = rproc_find_version_info(rproc, fw, &versz);
	if (version) {
		ret = rproc_handle_fw_version(rproc, version, versz);
		if (ret) {
			dev_err(dev, "Failed to process version info: %d\n",
				ret);
			goto clean_up;
		}
	}

    /* (1.2.3.6) 處理firmware中的segment:
            把elf中的segment段拷貝到對應的oproc->mem[i]中
     */
	if (!rproc->use_userspace_loader) {
		/* load the ELF segments to memory */
		ret = rproc_load_segments(rproc, fw);
		if (ret) {
			dev_err(dev, "Failed to load program segments: %d\n",
				ret);
			goto clean_up;
		}
	}

	/*
	 * The starting device has been given the rproc->cached_table as the
	 * resource table. The address of the vring along with the other
	 * allocated resources (carveouts etc) is stored in cached_table.
	 * In order to pass this information to the remote device we must
	 * copy this information to device memory.
	 */
    /* (1.2.3.7) 處理firmware中的loaded resource table:
            對於RSC_CUSTOM類型的resource,自定義處理
     */
	loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
	if (!loaded_table) {
		ret = -EINVAL;
		goto clean_up;
	}

	memcpy(loaded_table, rproc->cached_table, tablesz);

	/* handle fw resources which require fw segments to be loaded*/
	ret = rproc_handle_resources(rproc, tablesz,
				     rproc_post_loading_handlers);
	if (ret) {
		dev_err(dev, "Failed to process post-loading resources: %d\n",
			ret);
		goto clean_up;
	}

	/* power up the remote processor */
    /* (1.2.3.8) 對dsp進行上電操作 */
	ret = rproc->ops->start(rproc);
	if (ret) {
		dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
		goto clean_up;
	}

	/*
	 * Update table_ptr so that all subsequent vring allocations and
	 * virtio fields manipulation update the actual loaded resource table
	 * in device memory.
	 */
	rproc->table_ptr = loaded_table;

	rproc->state = RPROC_RUNNING;

    /* (1.2.3.9)  */
	dev_info(dev, "remote processor %s is now up\n", rproc->name);

	return 0;

clean_up:
	rproc_resource_cleanup(rproc);
	rproc_disable_iommu(rproc);
	return ret;
}

rproc->ops->start() -> omap_rproc_ops -> omap_rproc_start():

static struct rproc_ops omap_rproc_ops = {
	.start		= omap_rproc_start,
	.stop		= omap_rproc_stop,
	.kick		= omap_rproc_kick,
	.da_to_va	= omap_rproc_da_to_va,
};

↓

static int omap_rproc_start(struct rproc *rproc)
{
	struct omap_rproc *oproc = rproc->priv;
	struct device *dev = rproc->dev.parent;
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
	int ret;
	struct mbox_client *client = &oproc->client;

    /* (1.2.3.8.1) 配置boot地址:
            將firmware的boot地址寫入到boot寄存器當中
     */
	if (oproc->boot_data)
		omap_rproc_write_dsp_boot_addr(rproc);

	client->dev = dev;
	client->tx_done = NULL;
	client->rx_callback = omap_rproc_mbox_callback;
	client->tx_block = false;
	client->knows_txdone = false;

    /* (1.2.3.8.2) 配置相關mbox:
            在dsp相關的mbox中申請一個channel
            並且發送一個ping請求
     */
	oproc->mbox = mbox_request_channel(client, 0);
	if (IS_ERR(oproc->mbox)) {
		ret = -EBUSY;
		dev_err(dev, "mbox_request_channel failed: %ld\n",
			PTR_ERR(oproc->mbox));
		return ret;
	}

	/*
	 * Ping the remote processor. this is only for sanity-sake;
	 * there is no functional effect whatsoever.
	 *
	 * Note that the reply will _not_ arrive immediately: this message
	 * will wait in the mailbox fifo until the remote processor is booted.
	 */
	ret = mbox_send_message(oproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
	if (ret < 0) {
		dev_err(dev, "mbox_send_message failed: %d\n", ret);
		goto put_mbox;
	}

    /* (1.2.3.8.3) 使能相關timer:
            將"timers"和"watchdog-timers"時鐘,配置好時鐘源初始化完成   // timer_ops->request_timer(np) -> omap_rproc_request_timer
            將watchdog-timers"時鐘,註冊好超時中斷 // request_irq(timers[i].irq,omap_rproc_watchdog_isr)
     */
	ret = omap_rproc_enable_timers(pdev, true);
	if (ret) {
		dev_err(dev, "omap_rproc_enable_timers failed: %d\n", ret);
		goto put_mbox;
	}

    /* (1.2.3.8.4) 調用platform_data相關的enable函數:
            最終調用的是hwmod相關
     */
	ret = pdata->device_enable(pdev);
	if (ret) {
		dev_err(dev, "omap_device_enable failed: %d\n", ret);
		goto reset_timers;
	}

	/*
	 * remote processor is up, so update the runtime pm status and
	 * enable the auto-suspend. The device usage count is incremented
	 * manually for balancing it for auto-suspend
	 */
    /* (1.2.3.8.5) 調用pm_runtime相關函數:
            最終調用的是hwmod相關
     */
	pm_runtime_set_active(dev);
	pm_runtime_set_autosuspend_delay(dev, oproc->autosuspend_delay);
	pm_runtime_use_autosuspend(dev);
	pm_runtime_get_noresume(dev);
	pm_runtime_enable(dev);
	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);

	return 0;

reset_timers:
	omap_rproc_disable_timers(pdev, true);
put_mbox:
	mbox_free_channel(oproc->mbox);
	return ret;
}

2.2.2 omap hwmod

rpmsg_probe() -> rproc_virtio_config_ops -> rproc_virtio_find_vqs() -> rproc_boot() -> rproc_fw_boot() -> rproc->ops->start() -> omap_rproc_ops -> omap_rproc_start() -> pdata->device_enable() -> omap_rproc_device_enable():

int omap_rproc_device_enable(struct platform_device *pdev)
{
	int ret = -EINVAL;

	/*
	 * This reset management follows a device name check to differentiate
	 * DSP and IPU processor subsystems. This check is weak and is ok for
	 * now because of the dependencies against the pdata-quirks, where
	 * the devices are given specific device names that satisfy the
	 * criteria for the check. It can easily be replaced with a stronger
	 * check like device node compatibility check, if needed.
	 */
    /* (1.2.3.8.4.1) 去掉對"dsp"的reset */
	if (strstr(dev_name(&pdev->dev), "dsp")) {
		ret = omap_device_deassert_hardreset(pdev, "dsp");
		if (ret)
			goto out;
	} else if (strstr(dev_name(&pdev->dev), "ipu")) {
		ret = omap_device_deassert_hardreset(pdev, "cpu0");
		if (ret)
			goto out;

		ret = omap_device_deassert_hardreset(pdev, "cpu1");
		if (ret)
			goto out;
	} else {
		pr_err("unsupported remoteproc\n");
		goto out;
	}

    /* (1.2.3.8.4.2) 使能對應的device */
	ret = omap_device_enable(pdev);

out:
	if (ret)
		pr_err("failed for proc %s\n", dev_name(&pdev->dev));
	return ret;
}

以上操作的關鍵:都會根據platform_device找到對應的omap_device,然後根據omap_device.hwmods[]來操作其中的硬件。

2.2.2.1 omap_device

platform_device怎麼就轉變成了omap_device呢?

原來我們的omap驅動增加了鉤子函數,在platform_device設備註冊的時候如果其中包含"ti,hwmods"屬性,會額外的創建對應的omap_device。

arch\arm\mach-omap2\omap_device.c:

static struct notifier_block platform_nb = {
	.notifier_call = _omap_device_notifier_call,
};

static int __init omap_device_init(void)
{
	omap_hwmod_setup_reidle();
	bus_register_notifier(&platform_bus_type, &platform_nb);
	return 0;
}
omap_core_initcall(omap_device_init);

↓

static int _omap_device_notifier_call(struct notifier_block *nb,
				      unsigned long event, void *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_device *od;

	switch (event) {
    ...
	case BUS_NOTIFY_ADD_DEVICE:
		if (pdev->dev.of_node)
			omap_device_build_from_dt(pdev);
		omap_auxdata_legacy_init(dev);
		/* fall through */
	default:
		od = to_omap_device(pdev);
		if (od)
			od->_driver_status = event;
	}

	return NOTIFY_DONE;
}

↓

static int omap_device_build_from_dt(struct platform_device *pdev)
{
	struct omap_hwmod **hwmods;
	struct omap_device *od;
	struct omap_hwmod *oh;
	struct device_node *node = pdev->dev.of_node;
	const char *oh_name;
	int oh_cnt, i, ret = 0;
	bool device_active = false;

    /* (1) 該設備dts定義的屬性中,是否有"ti,hwmods"屬性,如果有進行omap_device的創建 */
	oh_cnt = of_property_count_strings(node, "ti,hwmods");
	if (oh_cnt <= 0) {
		dev_dbg(&pdev->dev, "No 'hwmods' to build omap_device\n");
		return -ENODEV;
	}

	hwmods = kzalloc(sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL);
	if (!hwmods) {
		ret = -ENOMEM;
		goto odbfd_exit;
	}

    /* (2) 根據"ti,hwmods"屬性的名稱,在hwmod全局鏈表中查找對應的omap_hwmod對象
            例如dsp中"ti,hwmods"屬性的定義:
                ti,hwmods = "dsp1";
            例如mailbox中"ti,hwmods"屬性的定義:
                ti,hwmods = "mailbox5";
     */
	for (i = 0; i < oh_cnt; i++) {
		of_property_read_string_index(node, "ti,hwmods", i, &oh_name);
		oh = omap_hwmod_lookup(oh_name);
		if (!oh) {
			dev_err(&pdev->dev, "Cannot lookup hwmod '%s'\n",
				oh_name);
			ret = -EINVAL;
			goto odbfd_exit1;
		}
		hwmods[i] = oh;
		if (oh->flags & HWMOD_INIT_NO_IDLE)
			device_active = true;
	}

    /* (3) 創建platform_device對應的omap_device */
	od = omap_device_alloc(pdev, hwmods, oh_cnt);
	if (IS_ERR(od)) {
		dev_err(&pdev->dev, "Cannot allocate omap_device for :%s\n",
			oh_name);
		ret = PTR_ERR(od);
		goto odbfd_exit1;
	}

	/* Fix up missing resource names */
    /* (4) */
	for (i = 0; i < pdev->num_resources; i++) {
		struct resource *r = &pdev->resource[i];

		if (r->name == NULL)
			r->name = dev_name(&pdev->dev);
	}

    /* (5) */
	pdev->dev.pm_domain = &omap_device_pm_domain;

	if (device_active) {
		omap_device_enable(pdev);
		pm_runtime_set_active(&pdev->dev);
	}

odbfd_exit1:
	kfree(hwmods);
odbfd_exit:
	/* if data/we are at fault.. load up a fail handler */
	if (ret)
		pdev->dev.pm_domain = &omap_device_fail_pm_domain;

	return ret;
}

2.2.2.2 hwmods全局鏈表(omap_hwmod_list)

在上一步的操作中有一個重要的步驟:根據"ti,hwmods"屬性的名稱,在hwmod全局鏈表中查找對應的omap_hwmod對象。

那麼hwmods全局鏈表(omap_hwmod_list)是什麼時候創建的呢?

arch\arm\mach-omap2\omap_hwmod_7xx_data.c:

int __init dra7xx_hwmod_init(void)
{
	int ret;

	omap_hwmod_init();
	ret = omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs);

	if (!ret && soc_is_dra74x())
		ret = omap_hwmod_register_links(dra74x_hwmod_ocp_ifs);
	else if (!ret && soc_is_dra72x())
		ret = omap_hwmod_register_links(dra72x_hwmod_ocp_ifs);

	if (!ret && omap_type() == OMAP2_DEVICE_TYPE_GP)
		ret = omap_hwmod_register_links(dra7xx_gp_hwmod_ocp_ifs);

	return ret;
}

↓

void __init omap_hwmod_init(void)
{
	if (cpu_is_omap24xx()) {
		soc_ops.wait_target_ready = _omap2xxx_3xxx_wait_target_ready;
		soc_ops.assert_hardreset = _omap2_assert_hardreset;
		soc_ops.deassert_hardreset = _omap2_deassert_hardreset;
		soc_ops.is_hardreset_asserted = _omap2_is_hardreset_asserted;
	} else if (cpu_is_omap34xx()) {
		soc_ops.wait_target_ready = _omap2xxx_3xxx_wait_target_ready;
		soc_ops.assert_hardreset = _omap2_assert_hardreset;
		soc_ops.deassert_hardreset = _omap2_deassert_hardreset;
		soc_ops.is_hardreset_asserted = _omap2_is_hardreset_asserted;
		soc_ops.init_clkdm = _init_clkdm;
	} else if (cpu_is_omap44xx() || soc_is_omap54xx() || soc_is_dra7xx()) {
		soc_ops.enable_module = _omap4_enable_module;
		soc_ops.disable_module = _omap4_disable_module;
		soc_ops.wait_target_ready = _omap4_wait_target_ready;
		soc_ops.assert_hardreset = _omap4_assert_hardreset;
		soc_ops.deassert_hardreset = _omap4_deassert_hardreset;
		soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
		soc_ops.init_clkdm = _init_clkdm;
		soc_ops.update_context_lost = _omap4_update_context_lost;
		soc_ops.get_context_lost = _omap4_get_context_lost;
	} else if (cpu_is_ti814x() || cpu_is_ti816x() || soc_is_am33xx() ||
		   soc_is_am43xx()) {
		soc_ops.enable_module = _omap4_enable_module;
		soc_ops.disable_module = _omap4_disable_module;
		soc_ops.wait_target_ready = _omap4_wait_target_ready;
		soc_ops.assert_hardreset = _omap4_assert_hardreset;
		soc_ops.deassert_hardreset = _am33xx_deassert_hardreset;
		soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
		soc_ops.init_clkdm = _init_clkdm;
	} else {
		WARN(1, "omap_hwmod: unknown SoC type\n");
	}

	inited = true;
}

dsp對應的hwmod操作爲:

/* dsp1 processor */
static struct omap_hwmod dra7xx_dsp1_hwmod = {
	.name		= "dsp1",
	.class		= &dra7xx_dsp_hwmod_class,
	.clkdm_name	= "dsp1_clkdm",
	.rst_lines	= dra7xx_dsp_resets,
	.rst_lines_cnt	= ARRAY_SIZE(dra7xx_dsp_resets),
	.main_clk	= "dpll_dsp_m2_ck",
	.prcm = {
		.omap4 = {
			.clkctrl_offs = DRA7XX_CM_DSP1_DSP1_CLKCTRL_OFFSET,
			.rstctrl_offs = DRA7XX_RM_DSP1_RSTCTRL_OFFSET,
			.context_offs = DRA7XX_RM_DSP1_DSP1_CONTEXT_OFFSET,
		},
	},
};

/* dsp2 processor */
static struct omap_hwmod dra7xx_dsp2_hwmod = {
	.name		= "dsp2",
	.class		= &dra7xx_dsp_hwmod_class,
	.clkdm_name	= "dsp2_clkdm",
	.rst_lines	= dra7xx_dsp_resets,
	.rst_lines_cnt	= ARRAY_SIZE(dra7xx_dsp_resets),
	.main_clk	= "dpll_dsp_m2_ck",
	.prcm = {
		.omap4 = {
			.clkctrl_offs = DRA7XX_CM_DSP2_DSP2_CLKCTRL_OFFSET,
			.rstctrl_offs = DRA7XX_RM_DSP2_RSTCTRL_OFFSET,
			.context_offs = DRA7XX_RM_DSP2_DSP2_CONTEXT_OFFSET,
		},
	},
};

2.2.3 mbox

在啓動dsp的過程中還有使用mailbox的操作:

rpmsg_probe() -> rproc_virtio_config_ops -> rproc_virtio_find_vqs() -> rproc_boot() -> rproc_fw_boot() -> rproc->ops->start() -> omap_rproc_ops -> omap_rproc_start() -> mbox_request_channel()

struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
{
	struct device *dev = cl->dev;
	struct mbox_controller *mbox;
	struct of_phandle_args spec;
	struct mbox_chan *chan;
	unsigned long flags;
	int ret;

	if (!dev || !dev->of_node) {
		pr_debug("%s: No owner device node\n", __func__);
		return ERR_PTR(-ENODEV);
	}

	mutex_lock(&con_mutex);
    
    /* (1.2.3.8.2.1) 根據遠端處理器中"mboxes"屬性的定義,找到對應的mbox的描述
            例如dsp節點中"mboxes"屬性的定義:
                mboxes = <&mailbox5 &mbox_dsp1_ipc3x>;
     */
	if (of_parse_phandle_with_args(dev->of_node, "mboxes",
				       "#mbox-cells", index, &spec)) {
		dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
		mutex_unlock(&con_mutex);
		return ERR_PTR(-ENODEV);
	}

	chan = ERR_PTR(-EPROBE_DEFER);

    /* (1.2.3.8.2.2) 根據mbox的描述,查找到對應的mbox對象 */
	list_for_each_entry(mbox, &mbox_cons, node)
		if (mbox->dev->of_node == spec.np) {
            /* (1.2.3.8.2.3) 從mbox中分配一個新的channel */
			chan = mbox->of_xlate(mbox, &spec);
			break;
		}

	of_node_put(spec.np);

	if (IS_ERR(chan)) {
		mutex_unlock(&con_mutex);
		return chan;
	}

	if (chan->cl || !try_module_get(mbox->dev->driver->owner)) {
		dev_dbg(dev, "%s: mailbox not free\n", __func__);
		mutex_unlock(&con_mutex);
		return ERR_PTR(-EBUSY);
	}

    /* (1.2.3.8.2.4) 根據參數初始化新channel */
	spin_lock_irqsave(&chan->lock, flags);
	chan->msg_free = 0;
	chan->msg_count = 0;
	chan->active_req = NULL;
	chan->cl = cl;
	init_completion(&chan->tx_complete);

	if (chan->txdone_method	== TXDONE_BY_POLL && cl->knows_txdone)
		chan->txdone_method |= TXDONE_BY_ACK;

	spin_unlock_irqrestore(&chan->lock, flags);

    /* (1.2.3.8.2.5) startup新channel */
	ret = chan->mbox->ops->startup(chan);
	if (ret) {
		dev_err(dev, "Unable to startup the chan (%d)\n", ret);
		mbox_free_channel(chan);
		chan = ERR_PTR(ret);
	}

	mutex_unlock(&con_mutex);
	return chan;
}

那麼這些被引用的mailbox是什麼時候創建的呢?

2.2.3.1 platform device

在dts中dsp中對mailbox的引用:

&dsp1 {
	status = "okay";
	memory-region = <&dsp1_cma_pool>;
	mboxes = <&mailbox5 &mbox_dsp1_ipc3x>;
	timers = <&timer5>;
};

在dts中定義了mailbox對應的platform device:

mailbox5: mailbox@48840000 {
	compatible = "ti,omap4-mailbox";
	reg = <0x48840000 0x200>;
	interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
		     <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
		     <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
		     <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>;
	ti,hwmods = "mailbox5";
	#mbox-cells = <1>;
	ti,mbox-num-users = <4>;
	ti,mbox-num-fifos = <12>;
	status = "disabled";
};

&mailbox5 {
	mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
		ti,mbox-tx = <6 2 2>;
		ti,mbox-rx = <4 2 2>;
		status = "disabled";
	};
	mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
		ti,mbox-tx = <5 2 2>;
		ti,mbox-rx = <1 2 2>;
		status = "disabled";
	};
};

&mailbox5 {
	status = "okay";
	mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
		status = "okay";
	};
	mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
		status = "okay";
	};
};

2.2.3.2 platform driver

對應的platform driver在以下注冊:

drivers\mailbox\omap-mailbox.c:

static struct platform_driver omap_mbox_driver = {
	.probe	= omap_mbox_probe,
	.remove	= omap_mbox_remove,
	.driver	= {
		.name = "omap-mailbox",
		.pm = &omap_mbox_pm_ops,
		.of_match_table = of_match_ptr(omap_mailbox_of_match),
	},
};

static const struct of_device_id omap_mailbox_of_match[] = {
	{
		.compatible	= "ti,omap2-mailbox",
		.data		= (void *)MBOX_INTR_CFG_TYPE1,
	},
	{
		.compatible	= "ti,omap3-mailbox",
		.data		= (void *)MBOX_INTR_CFG_TYPE1,
	},
	{
		.compatible	= "ti,omap4-mailbox",
		.data		= (void *)MBOX_INTR_CFG_TYPE2,
	},
	{
		/* end */
	},
};

2.2.4 iommu

在dsp的啓動過程中有對iommu的大量操作。

2.2.4.1 platform device

在dts中dsp中對iommus的引用:

		dsp1: dsp@40800000 {
			compatible = "ti,dra7-dsp";
			reg = <0x40800000 0x48000>,
			      <0x40e00000 0x8000>,
			      <0x40f00000 0x8000>;
			reg-names = "l2ram", "l1pram", "l1dram";
			ti,hwmods = "dsp1";
			syscon-bootreg = <&scm_conf 0x55c>;
			iommus = <&mmu0_dsp1>, <&mmu1_dsp1>;
			ti,rproc-standby-info = <0x4a005420>;
			status = "disabled";
		};

在dts中定義了iommu對應的platform device:

		mmu0_dsp1: mmu@40d01000 {
			compatible = "ti,dra7-dsp-iommu";
			reg = <0x40d01000 0x100>;
			interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
			ti,hwmods = "mmu0_dsp1";
			#iommu-cells = <0>;
			ti,syscon-mmuconfig = <&dsp1_system 0x0>;
			status = "disabled";
		};

		mmu1_dsp1: mmu@40d02000 {
			compatible = "ti,dra7-dsp-iommu";
			reg = <0x40d02000 0x100>;
			interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
			ti,hwmods = "mmu1_dsp1";
			#iommu-cells = <0>;
			ti,syscon-mmuconfig = <&dsp1_system 0x1>;
			status = "disabled";
		};

2.2.4.2 platform driver

對應的platform driver在以下注冊:

drivers\iommu\omap-iommu.c:

static const struct of_device_id omap_iommu_of_match[] = {
	{ .compatible = "ti,omap2-iommu" },
	{ .compatible = "ti,omap4-iommu" },
	{ .compatible = "ti,dra7-iommu"	},
	{ .compatible = "ti,dra7-dsp-iommu" },
	{},
};

static struct platform_driver omap_iommu_driver = {
	.probe	= omap_iommu_probe,
	.remove	= omap_iommu_remove,
	.driver	= {
		.name	= "omap-iommu",
		.pm	= &omap_iommu_pm_ops,
		.of_match_table = of_match_ptr(omap_iommu_of_match),
	},
};

static const struct dev_pm_ops omap_iommu_pm_ops = {
	.prepare = omap_iommu_prepare,
	SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
				     pm_runtime_force_resume)
	SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
			   omap_iommu_runtime_resume, NULL)
};

2.2.5 pm_runtime

在remoteproc的driver中定義了pm相關的函數:

static const struct dev_pm_ops omap_rproc_pm_ops = {
	SET_SYSTEM_SLEEP_PM_OPS(omap_rproc_suspend, omap_rproc_resume)
	SET_RUNTIME_PM_OPS(omap_rproc_runtime_suspend,
			   omap_rproc_runtime_resume, NULL)
};

#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
	.suspend = suspend_fn, \
	.resume = resume_fn, \
	.freeze = suspend_fn, \
	.thaw = resume_fn, \
	.poweroff = suspend_fn, \
	.restore = resume_fn,

#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
	.runtime_suspend = suspend_fn, \
	.runtime_resume = resume_fn, \
	.runtime_idle = idle_fn,

omap_rproc_suspend()和omap_rproc_resume()是普通的電源管理函數;

omap_rproc_runtime_suspend()和omap_rproc_runtime_resume()是pmruntime電源管理函數;

2.2.5.1 hwmod

最後調用的都是_omap_rproc_suspend()和_omap_rproc_resume()函數:

_omap_rproc_resume()和omap_rproc_start()函數一樣,最終調用設備的hwmod函數pdata->device_enable()來操作;

_omap_rproc_suspend()和omap_rproc_stop()函數一樣,最終調用設備的hwmod函數pdata->device_shutdown()來操作;

static int _omap_rproc_resume(struct rproc *rproc, bool auto_suspend)
{
	struct device *dev = rproc->dev.parent;
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_rproc_pdata *pdata = dev_get_platdata(dev);
	struct omap_rproc *oproc = rproc->priv;
	int ret;

	/*
	 * IOMMUs would have to be enabled specifically for runtime resume.
	 * They would have been already enabled automatically through System
	 * PM callbacks for regular system resume
	 */
	if (auto_suspend) {
		ret = omap_iommu_domain_activate(rproc->domain);
		if (ret) {
			dev_err(dev, "omap_iommu activate failed %d\n", ret);
			goto out;
		}
	}

	/* boot address could be lost after suspend, so restore it */
	if (oproc->boot_data)
		omap_rproc_write_dsp_boot_addr(rproc);

	ret = omap_rproc_enable_timers(pdev, false);
	if (ret) {
		dev_err(dev, "enabling timers during resume failed %d\n",
			ret);
		goto suspend_iommu;
	}

    /* 和omap_rproc_start()函數一樣,最終調用設備的hwmod函數來操作 */
	ret = pdata->device_enable(pdev);
	if (ret)
		goto disable_timers;

	return 0;

disable_timers:
	omap_rproc_disable_timers(pdev, false);
suspend_iommu:
	if (auto_suspend)
		omap_iommu_domain_deactivate(rproc->domain);
out:
	return ret;
}

2.2.5.2 pm_runtime_get/put

pm_runtime的基本原理就是在普通pm操作的基礎上加上引用計數。

dev的pm狀態一共有以下4種:上電(RPM_ACTIVE)、下電(RPM_SUSPENDED)、上電中(RPM_RESUMING)、下電中(RPM_SUSPENDING);

enum rpm_status {
	RPM_ACTIVE = 0,
	RPM_RESUMING,
	RPM_SUSPENDED,
	RPM_SUSPENDING,
};
  • dev使用計數來統計其他模塊對本dev的使用,get加1,put減1:
dev->power.usage_count

如果dev之前是RPM_SUSPENDED狀態,有模塊get以後,進行resume操作把dev上電;

如果dev之前是RPM_ACTIVE狀態,有模塊put以後,如果usage_count減到0,進行suspend操作把dev下電;

  • dev還使用另外一個計數來統計子dev的使用情況:
dev->power.child_count

dev->power.ignore_children標誌沒有設置的情況下,dev需要統計子dev的power情況:只要有一個子dev在工作,父dev必須處於上電狀態。

需要dev->power.usage_countdev->power.child_count都爲0的情況下,才能對dec進行下電操作。

2.2.5.3 auto suspend

對於put操作,如果計數已經減到0,通常的操作時立即進行下電(suspend)操作。

也有一種延時下電的操作,稱爲auto suspend。它主要的思想是避免頻繁的上下電操作帶來的開銷,在下電的時候延時一段時間,如果這段時間又有get操作,就不需要進行實際的下電上電(suspend -> resume)操作了。

auto suspend相關的數據成員如下:

dev->power.suspend_timer        // 執行延時suspend動作的timer
dev->power.autosuspend_delay    // auto suspend的時長
dev->power.timer_expires        // timer到期時刻
dev->power.use_autosuspend      // auto suspend功能是否啓用

使能auto suspend以後,在put操作加上RPM_AUTO標誌就可以實現延時suspend功能了:

static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
{
	return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
}

2.2.5.4 pm_runtime disable

對pm_runtime功能,系統還設計了一個總開關來打開或者關閉。

disbale開關也是一個計數值,disable操作加1,enable操作減1:

dev->power.disable_depth

只要這個計數大於0,所有的pm_runtime操作都不能進行。

2.2.5.5 remoteproc的實際使用

  • remoteproc在start()操作中,打開pm_runtime功能,並且把初始狀態置爲auto suspend:
static int omap_rproc_start(struct rproc *rproc)
{
	...

	/*
	 * remote processor is up, so update the runtime pm status and
	 * enable the auto-suspend. The device usage count is incremented
	 * manually for balancing it for auto-suspend
	 */
	pm_runtime_set_active(dev);
	pm_runtime_set_autosuspend_delay(dev, oproc->autosuspend_delay);
	pm_runtime_use_autosuspend(dev);
	pm_runtime_get_noresume(dev);      /* 先get操作 */
	pm_runtime_enable(dev);
	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);   /* 後put操作,這個時候pm計數已經爲0,延時一段時間後會執行suspend操作 */

	...
}

如果過程沒有通訊remoteproc延時一段時間會進入suspend狀態;

  • 如果過程中有通訊,需要kick()操作,重新auto suspend計時:
static bool rproc_virtio_notify(struct virtqueue *vq)
{
	struct rproc_vring *rvring = vq->priv;
	struct rproc *rproc = rvring->rvdev->rproc;
	int notifyid = rvring->notifyid;

	dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);

	rproc->ops->kick(rproc, notifyid);
	return true;
}

↓

/* kick a virtqueue */
static void omap_rproc_kick(struct rproc *rproc, int vqid)
{
	struct omap_rproc *oproc = rproc->priv;
	struct device *dev = rproc->dev.parent;
	int ret;

	/* wake up the rproc before kicking it */
	ret = pm_runtime_get_sync(dev);    /* 重新get操作 */
	if (WARN_ON(ret < 0)) {
		dev_err(dev, "pm_runtime_get_sync() failed during kick, ret = %d\n",
			ret);
		pm_runtime_put_noidle(dev);
		return;
	}

	/* send the index of the triggered virtqueue in the mailbox payload */
    /* 通過mailbox告知對方vring中有消息接收 */
	ret = mbox_send_message(oproc->mbox, (void *)vqid);
	if (ret < 0)
		dev_err(dev, "failed to send mailbox message, status = %d\n",
			ret);

	pm_runtime_mark_last_busy(dev);    /* 重新刷新busy時間戳 */
	pm_runtime_put_autosuspend(dev);   /* 重新put操作,計數爲0,重新開始一個auto suspend計時週期 */
}
  • 如果關閉remoteproc會調用stop()操作:
static int omap_rproc_stop(struct rproc *rproc)
{
	...

	/*
	 * update the runtime pm states and status now that the remoteproc
	 * has stopped
	 */
	pm_runtime_disable(dev);
	pm_runtime_dont_use_autosuspend(dev);
	pm_runtime_put_noidle(dev);
	pm_runtime_set_suspended(dev);

	...
}

3. rpmsg

從機制上看,vring存儲實際的大容量數據,mailbox用來傳送vring有數據的消息。

3.1 name service rpmsg channel

dsp的platform driver根據firmware中定義的RSC_VDEV類型的resource創建了對應的virtio device;

virtio_ipc_driver用來適配id=VIRTIO_ID_RPMSG的virtio,在rpmsg_probe()函數中根據對應RSC_VDEV類型resource中關於虛擬隊列的定義,創建了"tx,rx"兩個虛擬隊列;

在虛擬隊列(virtqueue)之上,可以進一步細分來創建channel。

在rpmsg_probe()初始化的時候,如果定義了VIRTIO_RPMSG_F_NS特性,會默認創建一個name service handle:

static int rpmsg_probe(struct virtio_device *vdev)
{
	...

	/* if supported by the remote processor, enable the name service */
    /* (5) 如果virtio device支持name service,在隊列的基礎上創建對應的通道 */
	if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
		/* a dedicated endpoint handles the name service msgs */
		vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
						vrp, RPMSG_NS_ADDR);
		if (!vrp->ns_ept) {
			dev_err(&vdev->dev, "failed to create the ns ept\n");
			err = -ENOMEM;
			goto free_coherent;
		}
	}

	...
}

當隊列接收到RPMSG_NS_ADDR(name service)相關的msg,會調用對應的回調函數rpmsg_ns_cb():

static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len,
							void *priv, u32 src)
{
	struct rpmsg_ns_msg *msg = data;
	struct rpmsg_channel *newch;
	struct rpmsg_channel_info chinfo;
	struct virtproc_info *vrp = priv;
	struct device *dev = &vrp->vdev->dev;
	int ret;

#if defined(CONFIG_DYNAMIC_DEBUG)
	dynamic_hex_dump("NS announcement: ", DUMP_PREFIX_NONE, 16, 1,
			 data, len, true);
#endif

	if (len != sizeof(*msg)) {
		dev_err(dev, "malformed ns msg (%d)\n", len);
		return;
	}

	/*
	 * the name service ept does _not_ belong to a real rpmsg channel,
	 * and is handled by the rpmsg bus itself.
	 * for sanity reasons, make sure a valid rpdev has _not_ sneaked
	 * in somehow.
	 */
	if (rpdev) {
		dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
		return;
	}

	/* don't trust the remote processor for null terminating the name */
	msg->name[RPMSG_NAME_SIZE - 1] = '\0';

    /* (5.1) dsp啓動完成後,最後一句打印在此:
        [   14.328946] virtio_rpmsg_bus virtio0: creating channel rpmsg-proto addr 0x3d
     */
	dev_info(dev, "%sing channel %s addr 0x%x\n",
			msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat",
			msg->name, msg->addr);

	strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
	strncpy(chinfo.desc, msg->desc, sizeof(chinfo.desc));
	chinfo.src = RPMSG_ADDR_ANY;
	chinfo.dst = msg->addr;

    /* (5.2) 根據msg中的標誌位來創建或者銷燬channel */
	if (msg->flags & RPMSG_NS_DESTROY) {
		ret = __rpmsg_destroy_channel(vrp, &chinfo);
		if (ret)
			dev_err(dev, "__rpmsg_destroy_channel failed: %d\n",
				ret);
	} else {
		newch = __rpmsg_create_channel(vrp, &chinfo);
		if (!newch)
			dev_err(dev, "__rpmsg_create_channel failed\n");
	}
}

3.2 rpmsg socket

系統把在虛擬隊列(virtqueue)之上創建channel和遠程處理器進行通訊的功能,封裝成了socket。

net\rpmsg\rpmsg_proto.c:

static int __init rpmsg_proto_init(void)
{
	int ret;

	ret = proto_register(&rpmsg_proto, 0);
	if (ret) {
		pr_err("proto_register failed: %d\n", ret);
		return ret;
	}

	ret = sock_register(&rpmsg_proto_family);
	if (ret) {
		pr_err("sock_register failed: %d\n", ret);
		goto proto_unreg;
	}

	ret = register_rpmsg_driver(&rpmsg_proto_driver);
	if (ret) {
		pr_err("register_rpmsg_driver failed: %d\n", ret);
		goto sock_unreg;
	}

	return 0;

sock_unreg:
	sock_unregister(PF_RPMSG);
proto_unreg:
	proto_unregister(&rpmsg_proto);
	return ret;
}

↓

static const struct net_proto_family rpmsg_proto_family = {
	.family = PF_RPMSG,
	.create	= rpmsg_sock_create,
	.owner = THIS_MODULE,
};

↓

static int rpmsg_sock_create(struct net *net, struct socket *sock, int proto,
			     int kern)
{
	struct sock *sk;
	struct rpmsg_socket *rpsk;

	if (sock->type != SOCK_SEQPACKET)
		return -ESOCKTNOSUPPORT;
	if (proto != 0)
		return -EPROTONOSUPPORT;

	sk = sk_alloc(net, PF_RPMSG, GFP_KERNEL, &rpmsg_proto, kern);
	if (!sk)
		return -ENOMEM;

	sock->state = SS_UNCONNECTED;
	sock->ops = &rpmsg_sock_ops;
	sock_init_data(sock, sk);

	sk->sk_destruct = rpmsg_sock_destruct;
	sk->sk_protocol = proto;

	sk->sk_state = RPMSG_OPEN;

	rpsk = container_of(sk, struct rpmsg_socket, sk);
	INIT_LIST_HEAD(&rpsk->elem);
	/* use RPMSG_LOCALHOST to serve as an invalid value */
	rpsk->rproc_id = RPMSG_LOCALHOST;

	return 0;
}

↓

static const struct proto_ops rpmsg_sock_ops = {
	.family		= PF_RPMSG,
	.owner		= THIS_MODULE,

	.release	= rpmsg_sock_release,
	.connect	= rpmsg_sock_connect,
	.getname	= rpmsg_sock_getname,
	.sendmsg	= rpmsg_sock_sendmsg,
	.recvmsg	= rpmsg_sock_recvmsg,
	.poll		= rpmsg_sock_poll,
	.bind		= rpmsg_sock_bind,

	.listen		= sock_no_listen,
	.accept		= sock_no_accept,
	.ioctl		= sock_no_ioctl,
	.mmap		= sock_no_mmap,
	.socketpair	= sock_no_socketpair,
	.shutdown	= sock_no_shutdown,
	.setsockopt	= sock_no_setsockopt,
	.getsockopt	= sock_no_getsockopt
};

3.2.1 rpmsg_bus device

PF_RPMSG類型的socket在bind()的時候,會創建對應的channel:

static int
rpmsg_sock_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
	struct sock *sk = sock->sk;
	struct rpmsg_socket *rpsk = container_of(sk, struct rpmsg_socket, sk);
	struct rpmsg_channel *rpdev;
	struct sockaddr_rpmsg *sa = (struct sockaddr_rpmsg *)uaddr;
	struct virtproc_info *vrp;

	if (sock->state == SS_CONNECTED)
		return -EINVAL;

	if (addr_len != sizeof(*sa))
		return -EINVAL;

	if (sa->family != AF_RPMSG)
		return -EINVAL;

	if (rpsk->rpdev)
		return -EBUSY;

	if (sk->sk_state != RPMSG_OPEN)
		return -EINVAL;

	vrp = radix_tree_lookup(&rpmsg_vprocs, sa->vproc_id);
	if (!vrp)
		return -EINVAL;

	rpdev = rpmsg_create_channel(vrp, "rpmsg-proto", "", sa->addr,
				     RPMSG_ADDR_ANY);
	if (!rpdev)
		return -EINVAL;

	rpsk->rpdev = rpdev;
	rpsk->unregister_rpdev = true;
	rpsk->rproc_id = sa->vproc_id;

	/* bind this socket with its rpmsg endpoint */
	rpdev->ept->priv = sk;

	sk->sk_state = RPMSG_LISTENING;

	return 0;
}

↓

rpmsg_create_channel()

↓

static
struct rpmsg_channel *__rpmsg_create_channel(struct virtproc_info *vrp,
					     struct rpmsg_channel_info *chinfo)
{
	struct rpmsg_channel *rpdev;
	struct device *tmp, *dev = &vrp->vdev->dev;
	int ret;

	/* make sure a similar channel doesn't already exist */
	tmp = device_find_child(dev, chinfo, rpmsg_channel_match);
	if (tmp) {
		/* decrement the matched device's refcount back */
		put_device(tmp);
		dev_err(dev, "channel %s:%s:%x:%x already exist\n",
			chinfo->name, chinfo->desc,
			chinfo->src, chinfo->dst);
		return NULL;
	}

	rpdev = kzalloc(sizeof(struct rpmsg_channel), GFP_KERNEL);
	if (!rpdev) {
		pr_err("kzalloc failed\n");
		return NULL;
	}

	rpdev->vrp = vrp;
	rpdev->src = chinfo->src;
	rpdev->dst = chinfo->dst;
	strncpy(rpdev->desc, chinfo->desc, RPMSG_NAME_SIZE);

	/*
	 * rpmsg server channels has predefined local address (for now),
	 * and their existence needs to be announced remotely
	 */
	rpdev->announce = rpdev->src != RPMSG_ADDR_ANY ? true : false;

    /* 對應的值爲:"rpmsg-proto" */
	strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE);

	/* very simple device indexing plumbing which is enough for now */
	dev_set_name(&rpdev->dev, "rpmsg%d", rpmsg_dev_index++);

    /* 根據channel的配置,創建了一個rpmsg_bus總線上的device */
	rpdev->dev.parent = &vrp->vdev->dev;
	rpdev->dev.bus = &rpmsg_bus;
	rpdev->dev.release = rpmsg_release_device;

	ret = device_register(&rpdev->dev);
	if (ret) {
		dev_err(dev, "device_register failed: %d\n", ret);
		put_device(&rpdev->dev);
		return NULL;
	}

	return rpdev;
}

3.2.2 rpmsg_bus driver

在初始化的時候註冊了rpmsg_bus總線上對應的driver:

net\rpmsg\rpmsg_proto.c:

static int __init rpmsg_proto_init(void)
{
	int ret;

	ret = proto_register(&rpmsg_proto, 0);
	if (ret) {
		pr_err("proto_register failed: %d\n", ret);
		return ret;
	}

	ret = sock_register(&rpmsg_proto_family);
	if (ret) {
		pr_err("sock_register failed: %d\n", ret);
		goto proto_unreg;
	}

	ret = register_rpmsg_driver(&rpmsg_proto_driver);
	if (ret) {
		pr_err("register_rpmsg_driver failed: %d\n", ret);
		goto sock_unreg;
	}

	return 0;

sock_unreg:
	sock_unregister(PF_RPMSG);
proto_unreg:
	proto_unregister(&rpmsg_proto);
	return ret;
}

↓

static struct rpmsg_driver rpmsg_proto_driver = {
	.drv.name	= KBUILD_MODNAME,
	.id_table	= rpmsg_proto_id_table,
	.probe		= rpmsg_proto_probe,
	.callback	= rpmsg_proto_cb,
	.remove		= rpmsg_proto_remove,
};

static struct rpmsg_device_id rpmsg_proto_id_table[] = {
	{ .name	= "rpmsg-proto" },
	{ },
};

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章