# HG changeset patch # User Keir Fraser # Date 1224512379 -3600 # Node ID 2a25fd94c6f207d5b9066a1d765697a5a680fc42 # Parent bf84c03c38eebc527786e96af4178f114a5bea41 VT-d: correct allocation failure checks Checking the return value of map_domain_page() (and hence map_vtd_domain_page()) against NULL is pointless, checking the return value of alloc_domheap_page() (and thus alloc_pgtable_maddr()) is mandatory, however. Signed-off-by: Jan Beulich --- a/xen/drivers/passthrough/vtd/intremap.c +++ b/xen/drivers/passthrough/vtd/intremap.c @@ -458,7 +458,7 @@ int intremap_setup(struct iommu *iommu) { dprintk(XENLOG_WARNING VTDPREFIX, "Cannot allocate memory for ir_ctrl->iremap_maddr\n"); - return -ENODEV; + return -ENOMEM; } ir_ctrl->iremap_index = -1; } --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -220,10 +220,10 @@ static u64 addr_to_dma_page_maddr(struct if ( !alloc ) break; maddr = alloc_pgtable_maddr(); + if ( !maddr ) + break; dma_set_pte_addr(*pte, maddr); vaddr = map_vtd_domain_page(maddr); - if ( !vaddr ) - break; /* * high level table always sets r/w, last level @@ -236,8 +236,6 @@ static u64 addr_to_dma_page_maddr(struct else { vaddr = map_vtd_domain_page(pte->val); - if ( !vaddr ) - break; } if ( level == 2 ) --- a/xen/drivers/passthrough/vtd/qinval.c +++ b/xen/drivers/passthrough/vtd/qinval.c @@ -429,7 +429,11 @@ int qinval_setup(struct iommu *iommu) { qi_ctrl->qinval_maddr = alloc_pgtable_maddr(); if ( qi_ctrl->qinval_maddr == 0 ) - panic("Cannot allocate memory for qi_ctrl->qinval_maddr\n"); + { + dprintk(XENLOG_WARNING VTDPREFIX, + "Cannot allocate memory for qi_ctrl->qinval_maddr\n"); + return -ENOMEM; + } flush->context = flush_context_qi; flush->iotlb = flush_iotlb_qi; } --- a/xen/drivers/passthrough/vtd/x86/vtd.c +++ b/xen/drivers/passthrough/vtd/x86/vtd.c @@ -41,17 +41,19 @@ u64 alloc_pgtable_maddr(void) { struct page_info *pg; u64 *vaddr; + unsigned long mfn; pg = alloc_domheap_page(NULL, 0); - vaddr = map_domain_page(page_to_mfn(pg)); - if ( !vaddr ) + if ( !pg ) return 0; + mfn = page_to_mfn(pg); + vaddr = map_domain_page(mfn); memset(vaddr, 0, PAGE_SIZE); iommu_flush_cache_page(vaddr); unmap_domain_page(vaddr); - return page_to_maddr(pg); + return (u64)mfn << PAGE_SHIFT_4K; } void free_pgtable_maddr(u64 maddr)