View | Details | Raw Unified | Return to bug 39607
Collapse All | Expand All

(-)a/drivers/pci/controller/Kconfig (-1 lines)
Lines 267-273 config PCIE_TANGO_SMP8759 Link Here
267
267
268
config VMD
268
config VMD
269
	depends on PCI_MSI && X86_64 && SRCU
269
	depends on PCI_MSI && X86_64 && SRCU
270
	select X86_DEV_DMA_OPS
271
	tristate "Intel Volume Management Device Driver"
270
	tristate "Intel Volume Management Device Driver"
272
	---help---
271
	---help---
273
	  Adds support for the Intel Volume Management Device (VMD). VMD is a
272
	  Adds support for the Intel Volume Management Device (VMD). VMD is a
(-)a/drivers/pci/controller/vmd.c (-151 lines)
Lines 98-106 struct vmd_dev { Link Here
98
	struct irq_domain	*irq_domain;
98
	struct irq_domain	*irq_domain;
99
	struct pci_bus		*bus;
99
	struct pci_bus		*bus;
100
	u8			busn_start;
100
	u8			busn_start;
101
102
	struct dma_map_ops	dma_ops;
103
	struct dma_domain	dma_domain;
104
};
101
};
105
102
106
static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
103
static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
Lines 295-445 static struct msi_domain_info vmd_msi_domain_info = { Link Here
295
	.chip		= &vmd_msi_controller,
292
	.chip		= &vmd_msi_controller,
296
};
293
};
297
294
298
/*
299
 * VMD replaces the requester ID with its own.  DMA mappings for devices in a
300
 * VMD domain need to be mapped for the VMD, not the device requiring
301
 * the mapping.
302
 */
303
static struct device *to_vmd_dev(struct device *dev)
304
{
305
	struct pci_dev *pdev = to_pci_dev(dev);
306
	struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
307
308
	return &vmd->dev->dev;
309
}
310
311
static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
312
		       gfp_t flag, unsigned long attrs)
313
{
314
	return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs);
315
}
316
317
static void vmd_free(struct device *dev, size_t size, void *vaddr,
318
		     dma_addr_t addr, unsigned long attrs)
319
{
320
	return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs);
321
}
322
323
static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
324
		    void *cpu_addr, dma_addr_t addr, size_t size,
325
		    unsigned long attrs)
326
{
327
	return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
328
			attrs);
329
}
330
331
static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
332
			   void *cpu_addr, dma_addr_t addr, size_t size,
333
			   unsigned long attrs)
334
{
335
	return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size,
336
			attrs);
337
}
338
339
static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
340
			       unsigned long offset, size_t size,
341
			       enum dma_data_direction dir,
342
			       unsigned long attrs)
343
{
344
	return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
345
			attrs);
346
}
347
348
static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
349
			   enum dma_data_direction dir, unsigned long attrs)
350
{
351
	dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs);
352
}
353
354
static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
355
		      enum dma_data_direction dir, unsigned long attrs)
356
{
357
	return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
358
}
359
360
static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
361
			 enum dma_data_direction dir, unsigned long attrs)
362
{
363
	dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
364
}
365
366
static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
367
				    size_t size, enum dma_data_direction dir)
368
{
369
	dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
370
}
371
372
static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
373
				       size_t size, enum dma_data_direction dir)
374
{
375
	dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir);
376
}
377
378
static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
379
				int nents, enum dma_data_direction dir)
380
{
381
	dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
382
}
383
384
static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
385
				   int nents, enum dma_data_direction dir)
386
{
387
	dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
388
}
389
390
static int vmd_dma_supported(struct device *dev, u64 mask)
391
{
392
	return dma_supported(to_vmd_dev(dev), mask);
393
}
394
395
static u64 vmd_get_required_mask(struct device *dev)
396
{
397
	return dma_get_required_mask(to_vmd_dev(dev));
398
}
399
400
static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
401
{
402
	struct dma_domain *domain = &vmd->dma_domain;
403
404
	if (get_dma_ops(&vmd->dev->dev))
405
		del_dma_domain(domain);
406
}
407
408
#define ASSIGN_VMD_DMA_OPS(source, dest, fn)	\
409
	do {					\
410
		if (source->fn)			\
411
			dest->fn = vmd_##fn;	\
412
	} while (0)
413
414
static void vmd_setup_dma_ops(struct vmd_dev *vmd)
415
{
416
	const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
417
	struct dma_map_ops *dest = &vmd->dma_ops;
418
	struct dma_domain *domain = &vmd->dma_domain;
419
420
	domain->domain_nr = vmd->sysdata.domain;
421
	domain->dma_ops = dest;
422
423
	if (!source)
424
		return;
425
	ASSIGN_VMD_DMA_OPS(source, dest, alloc);
426
	ASSIGN_VMD_DMA_OPS(source, dest, free);
427
	ASSIGN_VMD_DMA_OPS(source, dest, mmap);
428
	ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
429
	ASSIGN_VMD_DMA_OPS(source, dest, map_page);
430
	ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
431
	ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
432
	ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
433
	ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
434
	ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
435
	ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
436
	ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
437
	ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
438
	ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
439
	add_dma_domain(domain);
440
}
441
#undef ASSIGN_VMD_DMA_OPS
442
443
static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
295
static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
444
				  unsigned int devfn, int reg, int len)
296
				  unsigned int devfn, int reg, int len)
445
{
297
{
Lines 713-719 static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) Link Here
713
	}
565
	}
714
566
715
	vmd_attach_resources(vmd);
567
	vmd_attach_resources(vmd);
716
	vmd_setup_dma_ops(vmd);
717
	dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
568
	dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
718
569
719
	pci_scan_child_bus(vmd->bus);
570
	pci_scan_child_bus(vmd->bus);
Lines 829-835 static void vmd_remove(struct pci_dev *dev) Link Here
829
	pci_stop_root_bus(vmd->bus);
680
	pci_stop_root_bus(vmd->bus);
830
	pci_remove_root_bus(vmd->bus);
681
	pci_remove_root_bus(vmd->bus);
831
	vmd_cleanup_srcu(vmd);
682
	vmd_cleanup_srcu(vmd);
832
	vmd_teardown_dma_ops(vmd);
833
	vmd_detach_resources(vmd);
683
	vmd_detach_resources(vmd);
834
	irq_domain_remove(vmd->irq_domain);
684
	irq_domain_remove(vmd->irq_domain);
835
	irq_domain_free_fwnode(fn);
685
	irq_domain_free_fwnode(fn);
836
- 

Return to bug 39607