aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoerg Roedel2009-05-18 15:32:48 +0200
committerJoerg Roedel2009-05-28 18:14:26 +0200
commit803b8cb4d9a93b90c67aba2aab7f2c54d595b5b9 (patch)
tree385fa4952c9014d04501b69a2b4410072f4dd361
parent384de72910a7bf96a02a6d8023fe9e16d872beb2 (diff)
amd-iommu: change dma_dom->next_bit to dma_dom->next_address
Simplify the code a little bit by using the same unit for all address space related state in the dma_ops domain structure. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h2
-rw-r--r--arch/x86/kernel/amd_iommu.c17
2 files changed, 10 insertions, 9 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index eca912931a85..4ff4cf1f0809 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -249,7 +249,7 @@ struct dma_ops_domain {
unsigned long aperture_size;
/* address we start to search for free addresses */
- unsigned long next_bit;
+ unsigned long next_address;
/* address space relevant data */
struct aperture_range *aperture[APERTURE_MAX_RANGES];
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 794163ae97b4..c1a08b9119c9 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -627,13 +627,15 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
u64 dma_mask,
unsigned long start)
{
- unsigned long next_bit = dom->next_bit % APERTURE_RANGE_PAGES;
+ unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
int i = start >> APERTURE_RANGE_SHIFT;
unsigned long boundary_size;
unsigned long address = -1;
unsigned long limit;
+ next_bit >>= PAGE_SHIFT;
+
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;
@@ -652,7 +654,7 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
if (address != -1) {
address = dom->aperture[i]->offset +
(address << PAGE_SHIFT);
- dom->next_bit = (address >> PAGE_SHIFT) + pages;
+ dom->next_address = address + (pages << PAGE_SHIFT);
break;
}
@@ -669,14 +671,12 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
u64 dma_mask)
{
unsigned long address;
- unsigned long start = dom->next_bit << PAGE_SHIFT;
-
address = dma_ops_area_alloc(dev, dom, pages, align_mask,
- dma_mask, start);
+ dma_mask, dom->next_address);
if (address == -1) {
- dom->next_bit = 0;
+ dom->next_address = 0;
address = dma_ops_area_alloc(dev, dom, pages, align_mask,
dma_mask, 0);
dom->need_flush = true;
@@ -704,10 +704,11 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
- if ((address >> PAGE_SHIFT) >= dom->next_bit)
+ if (address >= dom->next_address)
dom->need_flush = true;
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
+
iommu_area_free(range->bitmap, address, pages);
}
@@ -870,7 +871,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
* a valid dma-address. So we can use 0 as error value
*/
dma_dom->aperture[0]->bitmap[0] = 1;
- dma_dom->next_bit = 0;
+ dma_dom->next_address = 0;
dma_dom->need_flush = false;
dma_dom->target_dev = 0xffff;