diff options
author | Linus Torvalds | 2018-08-26 11:48:42 -0700 |
---|---|---|
committer | Linus Torvalds | 2018-08-26 11:48:42 -0700 |
commit | aba16dc5cf9318b4e0fe92f8261779cd9f1d2d77 (patch) | |
tree | 1f53d2cee40e82efe6a727208307af475327af9a | |
parent | c4726e774ed27680c418e138234dfd2b8e1e89ac (diff) | |
parent | 1df895190233fcc30d46beca4550bcafb7b959a6 (diff) |
Merge branch 'ida-4.19' of git://git.infradead.org/users/willy/linux-dax
Pull IDA updates from Matthew Wilcox:
"A better IDA API:
id = ida_alloc(ida, GFP_xxx);
ida_free(ida, id);
rather than the cumbersome ida_simple_get(), ida_simple_remove().
The new IDA API is similar to ida_simple_get() but better named. The
internal restructuring of the IDA code removes the bitmap
preallocation nonsense.
I hope the net -200 lines of code is convincing"
* 'ida-4.19' of git://git.infradead.org/users/willy/linux-dax: (29 commits)
ida: Change ida_get_new_above to return the id
ida: Remove old API
test_ida: check_ida_destroy and check_ida_alloc
test_ida: Convert check_ida_conv to new API
test_ida: Move ida_check_max
test_ida: Move ida_check_leaf
idr-test: Convert ida_check_nomem to new API
ida: Start new test_ida module
target/iscsi: Allocate session IDs from an IDA
iscsi target: fix session creation failure handling
drm/vmwgfx: Convert to new IDA API
dmaengine: Convert to new IDA API
ppc: Convert vas ID allocation to new IDA API
media: Convert entity ID allocation to new IDA API
ppc: Convert mmu context allocation to new IDA API
Convert net_namespace to new IDA API
cb710: Convert to new IDA API
rsxx: Convert to new IDA API
osd: Convert to new IDA API
sd: Convert to new IDA API
...
28 files changed, 485 insertions, 679 deletions
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index 4a892d894a0f..dbd8f762140b 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -26,48 +26,16 @@ #include <asm/mmu_context.h> #include <asm/pgalloc.h> -static DEFINE_SPINLOCK(mmu_context_lock); static DEFINE_IDA(mmu_context_ida); static int alloc_context_id(int min_id, int max_id) { - int index, err; - -again: - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) - return -ENOMEM; - - spin_lock(&mmu_context_lock); - err = ida_get_new_above(&mmu_context_ida, min_id, &index); - spin_unlock(&mmu_context_lock); - - if (err == -EAGAIN) - goto again; - else if (err) - return err; - - if (index > max_id) { - spin_lock(&mmu_context_lock); - ida_remove(&mmu_context_ida, index); - spin_unlock(&mmu_context_lock); - return -ENOMEM; - } - - return index; + return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL); } void hash__reserve_context_id(int id) { - int rc, result = 0; - - do { - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) - break; - - spin_lock(&mmu_context_lock); - rc = ida_get_new_above(&mmu_context_ida, id, &result); - spin_unlock(&mmu_context_lock); - } while (rc == -EAGAIN); + int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL); WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result); } @@ -172,9 +140,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) void __destroy_context(int context_id) { - spin_lock(&mmu_context_lock); - ida_remove(&mmu_context_ida, context_id); - spin_unlock(&mmu_context_lock); + ida_free(&mmu_context_ida, context_id); } EXPORT_SYMBOL_GPL(__destroy_context); @@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx) { int index, context_id; - spin_lock(&mmu_context_lock); for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { context_id = ctx->extended_id[index]; if (context_id) - ida_remove(&mmu_context_ida, context_id); + ida_free(&mmu_context_ida, context_id); } - spin_unlock(&mmu_context_lock); } static void pte_frag_destroy(void *pte_frag) diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index ff9f48812331..e59e0e60e5b5 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -515,35 +515,17 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx) return 0; } -static DEFINE_SPINLOCK(vas_ida_lock); - static void vas_release_window_id(struct ida *ida, int winid) { - spin_lock(&vas_ida_lock); - ida_remove(ida, winid); - spin_unlock(&vas_ida_lock); + ida_free(ida, winid); } static int vas_assign_window_id(struct ida *ida) { - int rc, winid; - - do { - rc = ida_pre_get(ida, GFP_KERNEL); - if (!rc) - return -EAGAIN; - - spin_lock(&vas_ida_lock); - rc = ida_get_new(ida, &winid); - spin_unlock(&vas_ida_lock); - } while (rc == -EAGAIN); - - if (rc) - return rc; + int winid = ida_alloc_max(ida, VAS_WINDOWS_PER_CHIP - 1, GFP_KERNEL); - if (winid > VAS_WINDOWS_PER_CHIP) { - pr_err("Too many (%d) open windows\n", winid); - vas_release_window_id(ida, winid); + if (winid == -ENOSPC) { + pr_err("Too many (%d) open windows\n", VAS_WINDOWS_PER_CHIP); return -EAGAIN; } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index db253cd5b32a..d0666f5ce003 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -118,7 +118,6 @@ static struct dentry *dfs_device_status; static u32 cpu_use[NR_CPUS]; -static DEFINE_SPINLOCK(rssd_index_lock); static DEFINE_IDA(rssd_index_ida); static int mtip_block_initialize(struct driver_data *dd); @@ -3767,20 +3766,10 @@ static int mtip_block_initialize(struct driver_data *dd) goto alloc_disk_error; } - /* Generate the disk name, implemented same as in sd.c */ - do { - if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL)) { - rv = -ENOMEM; - goto ida_get_error; - } - - spin_lock(&rssd_index_lock); - rv = ida_get_new(&rssd_index_ida, &index); - spin_unlock(&rssd_index_lock); - } while (rv == -EAGAIN); - - if (rv) + rv = ida_alloc(&rssd_index_ida, GFP_KERNEL); + if (rv < 0) goto ida_get_error; + index = rv; rv = rssd_disk_name_format("rssd", index, @@ -3922,9 +3911,7 @@ block_queue_alloc_init_error: block_queue_alloc_tag_error: mtip_hw_debugfs_exit(dd); disk_index_error: - spin_lock(&rssd_index_lock); - ida_remove(&rssd_index_ida, index); - spin_unlock(&rssd_index_lock); + ida_free(&rssd_index_ida, index); ida_get_error: put_disk(dd->disk); @@ -4012,9 +3999,7 @@ static int mtip_block_remove(struct driver_data *dd) } dd->disk = NULL; - spin_lock(&rssd_index_lock); - ida_remove(&rssd_index_ida, dd->index); - spin_unlock(&rssd_index_lock); + ida_free(&rssd_index_ida, dd->index); /* De-initialize the protocol layer. */ mtip_hw_exit(dd); @@ -4054,9 +4039,7 @@ static int mtip_block_shutdown(struct driver_data *dd) dd->queue = NULL; } - spin_lock(&rssd_index_lock); - ida_remove(&rssd_index_ida, dd->index); - spin_unlock(&rssd_index_lock); + ida_free(&rssd_index_ida, dd->index); return 0; } diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index b7d71914a32a..f2c631ce793c 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -58,7 +58,6 @@ MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete " "until the card startup has completed."); static DEFINE_IDA(rsxx_disk_ida); -static DEFINE_SPINLOCK(rsxx_ida_lock); /* --------------------Debugfs Setup ------------------- */ @@ -771,19 +770,10 @@ static int rsxx_pci_probe(struct pci_dev *dev, card->dev = dev; pci_set_drvdata(dev, card); - do { - if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) { - st = -ENOMEM; - goto failed_ida_get; - } - - spin_lock(&rsxx_ida_lock); - st = ida_get_new(&rsxx_disk_ida, &card->disk_id); - spin_unlock(&rsxx_ida_lock); - } while (st == -EAGAIN); - - if (st) + st = ida_alloc(&rsxx_disk_ida, GFP_KERNEL); + if (st < 0) goto failed_ida_get; + card->disk_id = st; st = pci_enable_device(dev); if (st) @@ -985,9 +975,7 @@ failed_request_regions: failed_dma_mask: pci_disable_device(dev); failed_enable: - spin_lock(&rsxx_ida_lock); - ida_remove(&rsxx_disk_ida, card->disk_id); - spin_unlock(&rsxx_ida_lock); + ida_free(&rsxx_disk_ida, card->disk_id); failed_ida_get: kfree(card); @@ -1050,6 +1038,7 @@ static void rsxx_pci_remove(struct pci_dev *dev) pci_disable_device(dev); pci_release_regions(dev); + ida_free(&rsxx_disk_ida, card->disk_id); kfree(card); } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 272bed6c8ba7..f1a441ab395d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -161,9 +161,7 @@ static void chan_dev_release(struct device *dev) chan_dev = container_of(dev, typeof(*chan_dev), device); if (atomic_dec_and_test(chan_dev->idr_ref)) { - mutex_lock(&dma_list_mutex); - ida_remove(&dma_ida, chan_dev->dev_id); - mutex_unlock(&dma_list_mutex); + ida_free(&dma_ida, chan_dev->dev_id); kfree(chan_dev->idr_ref); } kfree(chan_dev); @@ -898,17 +896,12 @@ static bool device_has_all_tx_types(struct dma_device *device) static int get_dma_id(struct dma_device *device) { - int rc; - - do { - if (!ida_pre_get(&dma_ida, GFP_KERNEL)) - return -ENOMEM; - mutex_lock(&dma_list_mutex); - rc = ida_get_new(&dma_ida, &device->dev_id); - mutex_unlock(&dma_list_mutex); - } while (rc == -EAGAIN); + int rc = ida_alloc(&dma_ida, GFP_KERNEL); - return rc; + if (rc < 0) + return rc; + device->dev_id = rc; + return 0; } /** @@ -1092,9 +1085,7 @@ int dma_async_device_register(struct dma_device *device) err_out: /* if we never registered a channel just release the idr */ if (atomic_read(idr_ref) == 0) { - mutex_lock(&dma_list_mutex); - ida_remove(&dma_ida, device->dev_id); - mutex_unlock(&dma_list_mutex); + ida_free(&dma_ida, device->dev_id); kfree(idr_ref); return rc; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index ddb1e9365a3e..b93c558dd86e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -51,51 +51,34 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, { struct vmwgfx_gmrid_man *gman = (struct vmwgfx_gmrid_man *)man->priv; - int ret = 0; int id; mem->mm_node = NULL; + id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); + if (id < 0) + return id; + spin_lock(&gman->lock); if (gman->max_gmr_pages > 0) { gman->used_gmr_pages += bo->num_pages; if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) - goto out_err_locked; + goto nospace; } - do { - spin_unlock(&gman->lock); - if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) { - ret = -ENOMEM; - goto out_err; - } - spin_lock(&gman->lock); - - ret = ida_get_new(&gman->gmr_ida, &id); - if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) { - ida_remove(&gman->gmr_ida, id); - ret = 0; - goto out_err_locked; - } - } while (ret == -EAGAIN); - - if (likely(ret == 0)) { - mem->mm_node = gman; - mem->start = id; - mem->num_pages = bo->num_pages; - } else - goto out_err_locked; + mem->mm_node = gman; + mem->start = id; + mem->num_pages = bo->num_pages; spin_unlock(&gman->lock); return 0; -out_err: - spin_lock(&gman->lock); -out_err_locked: +nospace: gman->used_gmr_pages -= bo->num_pages; spin_unlock(&gman->lock); - return ret; + ida_free(&gman->gmr_ida, id); + return 0; } static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, @@ -105,8 +88,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, (struct vmwgfx_gmrid_man *)man->priv; if (mem->mm_node) { + ida_free(&gman->gmr_ida, mem->start); spin_lock(&gman->lock); - ida_remove(&gman->gmr_ida, mem->start); gman->used_gmr_pages -= mem->num_pages; spin_unlock(&gman->lock); mem->mm_node = NULL; diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index fcdf3d5dc4b6..3bae24b15eaa 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -585,18 +585,12 @@ int __must_check media_device_register_entity(struct media_device *mdev, entity->num_links = 0; entity->num_backlinks = 0; - if (!ida_pre_get(&mdev->entity_internal_idx, GFP_KERNEL)) - return -ENOMEM; - - mutex_lock(&mdev->graph_mutex); - - ret = ida_get_new_above(&mdev->entity_internal_idx, 1, - &entity->internal_idx); - if (ret < 0) { - mutex_unlock(&mdev->graph_mutex); + ret = ida_alloc_min(&mdev->entity_internal_idx, 1, GFP_KERNEL); + if (ret < 0) return ret; - } + entity->internal_idx = ret; + mutex_lock(&mdev->graph_mutex); mdev->entity_internal_idx_max = max(mdev->entity_internal_idx_max, entity->internal_idx); @@ -642,7 +636,7 @@ static void __media_device_unregister_entity(struct media_entity *entity) struct media_interface *intf; unsigned int i; - ida_simple_remove(&mdev->entity_internal_idx, entity->internal_idx); + ida_free(&mdev->entity_internal_idx, entity->internal_idx); /* Remove all interface links pointing to this entity */ list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) { diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c index 4d4acf763b65..2c43fd09d602 100644 --- a/drivers/misc/cb710/core.c +++ b/drivers/misc/cb710/core.c @@ -16,7 +16,6 @@ #include <linux/gfp.h> static DEFINE_IDA(cb710_ida); -static DEFINE_SPINLOCK(cb710_ida_lock); void cb710_pci_update_config_reg(struct pci_dev *pdev, int reg, uint32_t mask, uint32_t xor) @@ -205,7 +204,6 @@ static int cb710_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct cb710_chip *chip; - unsigned long flags; u32 val; int err; int n = 0; @@ -256,18 +254,10 @@ static int cb710_probe(struct pci_dev *pdev, if (err) return err; - do { - if (!ida_pre_get(&cb710_ida, GFP_KERNEL)) - return -ENOMEM; - - spin_lock_irqsave(&cb710_ida_lock, flags); - err = ida_get_new(&cb710_ida, &chip->platform_id); - spin_unlock_irqrestore(&cb710_ida_lock, flags); - - if (err && err != -EAGAIN) - return err; - } while (err); - + err = ida_alloc(&cb710_ida, GFP_KERNEL); + if (err < 0) + return err; + chip->platform_id = err; dev_info(&pdev->dev, "id %d, IO 0x%p, IRQ %d\n", chip->platform_id, chip->iobase, pdev->irq); @@ -308,7 +298,6 @@ unreg_mmc: static void cb710_remove_one(struct pci_dev *pdev) { struct cb710_chip *chip = pci_get_drvdata(pdev); - unsigned long flags; cb710_unregister_slot(chip, CB710_SLOT_SM); cb710_unregister_slot(chip, CB710_SLOT_MS); @@ -317,9 +306,7 @@ static void cb710_remove_one(struct pci_dev *pdev) BUG_ON(atomic_read(&chip->slot_refs_count) != 0); #endif - spin_lock_irqsave(&cb710_ida_lock, flags); - ida_remove(&cb710_ida, chip->platform_id); - spin_unlock_irqrestore(&cb710_ida_lock, flags); + ida_free(&cb710_ida, chip->platform_id); } static const struct pci_device_id cb710_pci_tbl[] = { diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index 0e56f1eb05dc..eaf36ccf58db 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -423,19 +423,11 @@ static int osd_probe(struct device *dev) if (scsi_device->type != TYPE_OSD) return -ENODEV; - do { - if (!ida_pre_get(&osd_minor_ida, GFP_KERNEL)) - return -ENODEV; - - error = ida_get_new(&osd_minor_ida, &minor); - } while (error == -EAGAIN); - - if (error) - return error; - if (minor >= SCSI_OSD_MAX_MINOR) { - error = -EBUSY; - goto err_retract_minor; - } + minor = ida_alloc_max(&osd_minor_ida, SCSI_OSD_MAX_MINOR, GFP_KERNEL); + if (minor == -ENOSPC) + return -EBUSY; + if (minor < 0) + return -ENODEV; error = -ENOMEM; oud = kzalloc(sizeof(*oud), GFP_KERNEL); @@ -499,7 +491,7 @@ static int osd_probe(struct device *dev) err_free_osd: put_device(&oud->class_dev); err_retract_minor: - ida_remove(&osd_minor_ida, minor); + ida_free(&osd_minor_ida, minor); return error; } @@ -514,7 +506,7 @@ static int osd_remove(struct device *dev) } cdev_device_del(&oud->cdev, &oud->class_dev); - ida_remove(&osd_minor_ida, oud->minor); + ida_free(&osd_minor_ida, oud->minor); put_device(&oud->class_dev); return 0; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a58cee7a85f2..b79b366a94f7 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -123,7 +123,6 @@ static void scsi_disk_release(struct device *cdev); static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); static void sd_print_result(const struct scsi_disk *, const char *, int); -static DEFINE_SPINLOCK(sd_index_lock); static DEFINE_IDA(sd_index_ida); /* This semaphore is used to mediate the 0->1 reference get in the @@ -3340,16 +3339,8 @@ static int sd_probe(struct device *dev) if (!gd) goto out_free; - do { - if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) - goto out_put; - - spin_lock(&sd_index_lock); - error = ida_get_new(&sd_index_ida, &index); - spin_unlock(&sd_index_lock); - } while (error == -EAGAIN); - - if (error) { + index = ida_alloc(&sd_index_ida, GFP_KERNEL); + if (index < 0) { sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n"); goto out_put; } @@ -3393,9 +3384,7 @@ static int sd_probe(struct device *dev) return 0; out_free_index: - spin_lock(&sd_index_lock); - ida_remove(&sd_index_ida, index); - spin_unlock(&sd_index_lock); + ida_free(&sd_index_ida, index); out_put: put_disk(gd); out_free: @@ -3460,9 +3449,7 @@ static void scsi_disk_release(struct device *dev) struct scsi_disk *sdkp = to_scsi_disk(dev); struct gendisk *disk = sdkp->disk; - spin_lock(&sd_index_lock); - ida_remove(&sd_index_ida, sdkp->index); - spin_unlock(&sd_index_lock); + ida_free(&sd_index_ida, sdkp->index); disk->private_data = NULL; put_disk(disk); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 8e223799347a..94bad43c41ff 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -57,9 +57,8 @@ static DEFINE_SPINLOCK(tiqn_lock); static DEFINE_MUTEX(np_lock); static struct idr tiqn_idr; -struct idr sess_idr; +DEFINE_IDA(sess_ida); struct mutex auth_id_lock; -spinlock_t sess_idr_lock; struct iscsit_global *iscsit_global; @@ -700,9 +699,7 @@ static int __init iscsi_target_init_module(void) spin_lock_init(&iscsit_global->ts_bitmap_lock); mutex_init(&auth_id_lock); - spin_lock_init(&sess_idr_lock); idr_init(&tiqn_idr); - idr_init(&sess_idr); ret = target_register_template(&iscsi_ops); if (ret) @@ -4375,10 +4372,7 @@ int iscsit_close_session(struct iscsi_session *sess) pr_debug("Decremented number of active iSCSI Sessions on" " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions); - spin_lock(&sess_idr_lock); - idr_remove(&sess_idr, sess->session_index); - spin_unlock(&sess_idr_lock); - + ida_free(&sess_ida, sess->session_index); kfree(sess->sess_ops); sess->sess_ops = NULL; spin_unlock_bh(&se_tpg->session_lock); diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h index 42de1843aa40..48bac0acf8c7 100644 --- a/drivers/target/iscsi/iscsi_target.h +++ b/drivers/target/iscsi/iscsi_target.h @@ -55,9 +55,7 @@ extern struct kmem_cache *lio_ooo_cache; extern struct kmem_cache *lio_qr_cache; extern struct kmem_cache *lio_r2t_cache; -extern struct idr sess_idr; +extern struct ida sess_ida; extern struct mutex auth_id_lock; -extern spinlock_t sess_idr_lock; - #endif /*** ISCSI_TARGET_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 923b1a9fc3dc..9e74f8bc2963 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -336,22 +336,15 @@ static int iscsi_login_zero_tsih_s1( timer_setup(&sess->time2retain_timer, iscsit_handle_time2retain_timeout, 0); - idr_preload(GFP_KERNEL); - spin_lock_bh(&sess_idr_lock); - ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT); - if (ret >= 0) - sess->session_index = ret; - spin_unlock_bh(&sess_idr_lock); - idr_preload_end(); - + ret = ida_alloc(&sess_ida, GFP_KERNEL); if (ret < 0) { - pr_err("idr_alloc() for sess_idr failed\n"); + pr_err("Session ID allocation failed %d\n", ret); iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); - kfree(sess); - return -ENOMEM; + goto free_sess; } + sess->session_index = ret; sess->creation_time = get_jiffies_64(); /* * The FFP CmdSN window values will be allocated from the TPG's @@ -365,20 +358,26 @@ static int iscsi_login_zero_tsih_s1( ISCSI_LOGIN_STATUS_NO_RESOURCES); pr_err("Unable to allocate memory for" " struct iscsi_sess_ops.\n"); - kfree(sess); - return -ENOMEM; + goto free_id; } sess->se_sess = transport_alloc_session(TARGET_PROT_NORMAL); if (IS_ERR(sess->se_sess)) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); - kfree(sess->sess_ops); - kfree(sess); - return -ENOMEM; + goto free_ops; } return 0; + +free_ops: + kfree(sess->sess_ops); +free_id: + ida_free(&sess_ida, sess->session_index); +free_sess: + kfree(sess); + conn->sess = NULL; + return -ENOMEM; } static int iscsi_login_zero_tsih_s2( @@ -1161,13 +1160,9 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, ISCSI_LOGIN_STATUS_INIT_ERR); if (!zero_tsih || !conn->sess) goto old_sess_out; - if (conn->sess->se_sess) - transport_free_session(conn->sess->se_sess); - if (conn->sess->session_index != 0) { - spin_lock_bh(&sess_idr_lock); - idr_remove(&sess_idr, conn->sess->session_index); - spin_unlock_bh(&sess_idr_lock); - } + + transport_free_session(conn->sess->se_sess); + ida_free(&sess_ida, conn->sess->session_index); kfree(conn->sess->sess_ops); kfree(conn->sess); conn->sess = NULL; diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index e072e955ce33..c53814539070 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -46,7 +46,7 @@ static int pty_limit = NR_UNIX98_PTY_DEFAULT; static int pty_reserve = NR_UNIX98_PTY_RESERVE; static int pty_limit_min; static int pty_limit_max = INT_MAX; -static int pty_count; +static atomic_t pty_count = ATOMIC_INIT(0); static struct ctl_table pty_table[] = { { @@ -93,8 +93,6 @@ static struct ctl_table pty_root_table[] = { {} }; -static DEFINE_MUTEX(allocated_ptys_lock); - struct pts_mount_opts { int setuid; int setgid; @@ -533,44 +531,25 @@ static struct file_system_type devpts_fs_type = { int devpts_new_index(struct pts_fs_info *fsi) { - int index; - int ida_ret; - -retry: - if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) - return -ENOMEM; - - mutex_lock(&allocated_ptys_lock); - if (pty_count >= (pty_limit - - (fsi->mount_opts.reserve ? 0 : pty_reserve))) { - mutex_unlock(&allocated_ptys_lock); - return -ENOSPC; - } + int index = -ENOSPC; - ida_ret = ida_get_new(&fsi->allocated_ptys, &index); - if (ida_ret < 0) { - mutex_unlock(&allocated_ptys_lock); - if (ida_ret == -EAGAIN) - goto retry; - return -EIO; - } + if (atomic_inc_return(&pty_count) >= (pty_limit - + (fsi->mount_opts.reserve ? 0 : pty_reserve))) + goto out; - if (index >= fsi->mount_opts.max) { - ida_remove(&fsi->allocated_ptys, index); - mutex_unlock(&allocated_ptys_lock); - return -ENOSPC; - } - pty_count++; - mutex_unlock(&allocated_ptys_lock); + index = ida_alloc_max(&fsi->allocated_ptys, fsi->mount_opts.max - 1, + GFP_KERNEL); + +out: + if (index < 0) + atomic_dec(&pty_count); return index; } void devpts_kill_index(struct pts_fs_info *fsi, int idx) { - mutex_lock(&allocated_ptys_lock); - ida_remove(&fsi->allocated_ptys, idx); - pty_count--; - mutex_unlock(&allocated_ptys_lock); + ida_free(&fsi->allocated_ptys, idx); + atomic_dec(&pty_count); } /** diff --git a/fs/namespace.c b/fs/namespace.c index 725d6935fab9..99186556f8d3 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -61,9 +61,6 @@ __setup("mphash_entries=", set_mphash_entries); static u64 event; static DEFINE_IDA(mnt_id_ida); static DEFINE_IDA(mnt_group_ida); -static DEFINE_SPINLOCK(mnt_id_lock); -static int mnt_id_start = 0; -static int mnt_group_start = 1; static struct hlist_head *mount_hashtable __read_mostly; static struct hlist_head *mountpoint_hashtable __read_mostly; @@ -101,50 +98,30 @@ static inline struct hlist_head *mp_hash(struct dentry *dentry) static int mnt_alloc_id(struct mount *mnt) { - int res; + int res = ida_alloc(&mnt_id_ida, GFP_KERNEL); -retry: - ida_pre_get(&mnt_id_ida, GFP_KERNEL); - spin_lock(&mnt_id_lock); - res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); - if (!res) - mnt_id_start = mnt->mnt_id + 1; - spin_unlock(&mnt_id_lock); - if (res == -EAGAIN) - goto retry; - - return res; + if (res < 0) + return res; + mnt->mnt_id = res; + return 0; } static void mnt_free_id(struct mount *mnt) { - int id = mnt->mnt_id; - spin_lock(&mnt_id_lock); - ida_remove(&mnt_id_ida, id); - if (mnt_id_start > id) - mnt_id_start = id; - spin_unlock(&mnt_id_lock); + ida_free(&mnt_id_ida, mnt->mnt_id); } /* * Allocate a new peer group ID - * - * mnt_group_ida is protected by namespace_sem */ static int mnt_alloc_group_id(struct mount *mnt) { - int res; + int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL); - if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) - return -ENOMEM; - - res = ida_get_new_above(&mnt_group_ida, - mnt_group_start, - &mnt->mnt_group_id); - if (!res) - mnt_group_start = mnt->mnt_group_id + 1; - - return res; + if (res < 0) + return res; + mnt->mnt_group_id = res; + return 0; } /* @@ -152,10 +129,7 @@ static int mnt_alloc_group_id(struct mount *mnt) */ void mnt_release_group_id(struct mount *mnt) { - int id = mnt->mnt_group_id; - ida_remove(&mnt_group_ida, id); - if (mnt_group_start > id) - mnt_group_start = id; + ida_free(&mnt_group_ida, mnt->mnt_group_id); mnt->mnt_group_id = 0; } diff --git a/fs/super.c b/fs/super.c index 7429588d6b49..f3a8c008e164 100644 --- a/fs/super.c +++ b/fs/super.c @@ -981,58 +981,42 @@ void emergency_thaw_all(void) } } -/* - * Unnamed block devices are dummy devices used by virtual - * filesystems which don't use real block-devices. -- jrs - */ - static DEFINE_IDA(unnamed_dev_ida); -static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ -/* Many userspace utilities consider an FSID of 0 invalid. - * Always return at least 1 from get_anon_bdev. - */ -static int unnamed_dev_start = 1; +/** + * get_anon_bdev - Allocate a block device for filesystems which don't have one. + * @p: Pointer to a dev_t. + * + * Filesystems which don't use real block devices can call this function + * to allocate a virtual block device. + * + * Context: Any context. Frequently called while holding sb_lock. + * Return: 0 on success, -EMFILE if there are no anonymous bdevs left + * or -ENOMEM if memory allocation failed. + */ int get_anon_bdev(dev_t *p) { int dev; - int error; - retry: - if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0) - return -ENOMEM; - spin_lock(&unnamed_dev_lock); - error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev); - if (!error) - unnamed_dev_start = dev + 1; - spin_unlock(&unnamed_dev_lock); - if (error == -EAGAIN) - /* We raced and lost with another CPU. */ - goto retry; - else if (error) - return -EAGAIN; - - if (dev >= (1 << MINORBITS)) { - spin_lock(&unnamed_dev_lock); - ida_remove(&unnamed_dev_ida, dev); - if (unnamed_dev_start > dev) - unnamed_dev_start = dev; - spin_unlock(&unnamed_dev_lock); - return -EMFILE; - } - *p = MKDEV(0, dev & MINORMASK); + /* + * Many userspace utilities consider an FSID of 0 invalid. + * Always return at least 1 from get_anon_bdev. + */ + dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1, + GFP_ATOMIC); + if (dev == -ENOSPC) + dev = -EMFILE; + if (dev < 0) + return dev; + + *p = MKDEV(0, dev); return 0; } EXPORT_SYMBOL(get_anon_bdev); void free_anon_bdev(dev_t dev) { - int slot = MINOR(dev); - spin_lock(&unnamed_dev_lock); - ida_remove(&unnamed_dev_ida, slot); - if (slot < unnamed_dev_start) - unnamed_dev_start = slot; - spin_unlock(&unnamed_dev_lock); + ida_free(&unnamed_dev_ida, MINOR(dev)); } EXPORT_SYMBOL(free_anon_bdev); @@ -1040,7 +1024,6 @@ int set_anon_super(struct super_block *s, void *data) { return get_anon_bdev(&s->s_dev); } - EXPORT_SYMBOL(set_anon_super); void kill_anon_super(struct super_block *sb) @@ -1049,7 +1032,6 @@ void kill_anon_super(struct super_block *sb) generic_shutdown_super(sb); free_anon_bdev(dev); } - EXPORT_SYMBOL(kill_anon_super); void kill_litter_super(struct super_block *sb) @@ -1058,7 +1040,6 @@ void kill_litter_super(struct super_block *sb) d_genocide(sb->s_root); kill_anon_super(sb); } - EXPORT_SYMBOL(kill_litter_super); static int ns_test_super(struct super_block *sb, void *data) diff --git a/include/linux/idr.h b/include/linux/idr.h index 3e8215b2c371..3ec8628ce17f 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -236,34 +236,74 @@ struct ida { } #define DEFINE_IDA(name) struct ida name = IDA_INIT(name) -int ida_pre_get(struct ida *ida, gfp_t gfp_mask); -int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); -void ida_remove(struct ida *ida, int id); +int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t); +void ida_free(struct ida *, unsigned int id); void ida_destroy(struct ida *ida); -int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, - gfp_t gfp_mask); -void ida_simple_remove(struct ida *ida, unsigned int id); +/** + * ida_alloc() - Allocate an unused ID. + * @ida: IDA handle. + * @gfp: Memory allocation flags. + * + * Allocate an ID between 0 and %INT_MAX, inclusive. + * + * Context: Any context. + * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, + * or %-ENOSPC if there are no free IDs. + */ +static inline int ida_alloc(struct ida *ida, gfp_t gfp) +{ + return ida_alloc_range(ida, 0, ~0, gfp); +} -static inline void ida_init(struct ida *ida) +/** + * ida_alloc_min() - Allocate an unused ID. + * @ida: IDA handle. + * @min: Lowest ID to allocate. + * @gfp: Memory allocation flags. + * + * Allocate an ID between @min and %INT_MAX, inclusive. + * + * Context: Any context. + * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, + * or %-ENOSPC if there are no free IDs. + */ +static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) { - INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); + return ida_alloc_range(ida, min, ~0, gfp); } /** - * ida_get_new - allocate new ID - * @ida: idr handle - * @p_id: pointer to the allocated handle + * ida_alloc_max() - Allocate an unused ID. + * @ida: IDA handle. + * @max: Highest ID to allocate. + * @gfp: Memory allocation flags. + * + * Allocate an ID between 0 and @max, inclusive. * - * Simple wrapper around ida_get_new_above() w/ @starting_id of zero. + * Context: Any context. + * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, + * or %-ENOSPC if there are no free IDs. */ -static inline int ida_get_new(struct ida *ida, int *p_id) +static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) { - return ida_get_new_above(ida, 0, p_id); + return ida_alloc_range(ida, 0, max, gfp); } +static inline void ida_init(struct ida *ida) +{ + INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); +} + +#define ida_simple_get(ida, start, end, gfp) \ + ida_alloc_range(ida, start, (end) - 1, gfp) +#define ida_simple_remove(ida, id) ida_free(ida, id) + static inline bool ida_is_empty(const struct ida *ida) { return radix_tree_empty(&ida->ida_rt); } + +/* in lib/radix-tree.c */ +int ida_pre_get(struct ida *ida, gfp_t gfp_mask); #endif /* __IDR_H__ */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 3589765141a8..613316724c6a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1833,6 +1833,9 @@ config TEST_HASH This is intended to help people writing architecture-specific optimized versions. If unsure, say N. +config TEST_IDA + tristate "Perform selftest on IDA functions" + config TEST_PARMAN tristate "Perform selftest on priority array manager" depends on PARMAN diff --git a/lib/Makefile b/lib/Makefile index 9baefb6cb1a1..ca3f7ebb900d 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -50,6 +50,7 @@ obj-$(CONFIG_TEST_BPF) += test_bpf.o obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o +obj-$(CONFIG_TEST_IDA) += test_ida.o obj-$(CONFIG_TEST_KASAN) += test_kasan.o CFLAGS_test_kasan.o += -fno-builtin obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o diff --git a/lib/idr.c b/lib/idr.c index ed9c169c12bd..fab2fd5bc326 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -317,18 +317,12 @@ EXPORT_SYMBOL(idr_replace); * bit per ID, and so is more space efficient than an IDR. To use an IDA, * define it using DEFINE_IDA() (or embed a &struct ida in a data structure, * then initialise it using ida_init()). To allocate a new ID, call - * ida_simple_get(). To free an ID, call ida_simple_remove(). + * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range(). + * To free an ID, call ida_free(). * - * If you have more complex locking requirements, use a loop around - * ida_pre_get() and ida_get_new() to allocate a new ID. Then use - * ida_remove() to free an ID. You must make sure that ida_get_new() and - * ida_remove() cannot be called at the same time as each other for the - * same IDA. - * - * You can also use ida_get_new_above() if you need an ID to be allocated - * above a particular number. ida_destroy() can be used to dispose of an - * IDA without needing to free the individual IDs in it. You can use - * ida_is_empty() to find out whether the IDA has any IDs currently allocated. + * ida_destroy() can be used to dispose of an IDA without needing to + * free the individual IDs in it. You can use ida_is_empty() to find + * out whether the IDA has any IDs currently allocated. * * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward * limitation, it should be quite straightforward to raise the maximum. @@ -369,25 +363,7 @@ EXPORT_SYMBOL(idr_replace); #define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1) -/** - * ida_get_new_above - allocate new ID above or equal to a start id - * @ida: ida handle - * @start: id to start search at - * @id: pointer to the allocated handle - * - * Allocate new ID above or equal to @start. It should be called - * with any required locks to ensure that concurrent calls to - * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed. - * Consider using ida_simple_get() if you do not have complex locking - * requirements. - * - * If memory is required, it will return %-EAGAIN, you should unlock - * and go back to the ida_pre_get() call. If the ida is full, it will - * return %-ENOSPC. On success, it will return 0. - * - * @id returns a value in the range @start ... %0x7fffffff. - */ -int ida_get_new_above(struct ida *ida, int start, int *id) +static int ida_get_new_above(struct ida *ida, int start) { struct radix_tree_root *root = &ida->ida_rt; void __rcu **slot; @@ -426,8 +402,8 @@ int ida_get_new_above(struct ida *ida, int start, int *id) if (ebit < BITS_PER_LONG) { tmp |= 1UL << ebit; rcu_assign_pointer(*slot, (void *)tmp); - *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT; - return 0; + return new + ebit - + RADIX_TREE_EXCEPTIONAL_SHIFT; } bitmap = this_cpu_xchg(ida_bitmap, NULL); if (!bitmap) @@ -458,8 +434,7 @@ int ida_get_new_above(struct ida *ida, int start, int *id) RADIX_TREE_EXCEPTIONAL_ENTRY); radix_tree_iter_replace(root, &iter, slot, bitmap); - *id = new; - return 0; + return new; } bitmap = this_cpu_xchg(ida_bitmap, NULL); if (!bitmap) @@ -468,20 +443,11 @@ int ida_get_new_above(struct ida *ida, int start, int *id) radix_tree_iter_replace(root, &iter, slot, bitmap); } - *id = new; - return 0; + return new; } } -EXPORT_SYMBOL(ida_get_new_above); -/** - * ida_remove - Free the given ID - * @ida: ida handle - * @id: ID to free - * - * This function should not be called at the same time as ida_get_new_above(). - */ -void ida_remove(struct ida *ida, int id) +static void ida_remove(struct ida *ida, int id) { unsigned long index = id / IDA_BITMAP_BITS; unsigned offset = id % IDA_BITMAP_BITS; @@ -518,99 +484,90 @@ void ida_remove(struct ida *ida, int id) } return; err: - WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); + WARN(1, "ida_free called for id=%d which is not allocated.\n", id); } -EXPORT_SYMBOL(ida_remove); /** - * ida_destroy - Free the contents of an ida - * @ida: ida handle + * ida_destroy() - Free all IDs. + * @ida: IDA handle. + * + * Calling this function frees all IDs and releases all resources used + * by an IDA. When this call returns, the IDA is empty and can be reused + * or freed. If the IDA is already empty, there is no need to call this + * function. * - * Calling this function releases all resources associated with an IDA. When - * this call returns, the IDA is empty and can be reused or freed. The caller - * should not allow ida_remove() or ida_get_new_above() to be called at the - * same time. + * Context: Any context. */ void ida_destroy(struct ida *ida) { + unsigned long flags; struct radix_tree_iter iter; void __rcu **slot; + xa_lock_irqsave(&ida->ida_rt, flags); radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { struct ida_bitmap *bitmap = rcu_dereference_raw(*slot); if (!radix_tree_exception(bitmap)) kfree(bitmap); radix_tree_iter_delete(&ida->ida_rt, &iter, slot); } + xa_unlock_irqrestore(&ida->ida_rt, flags); } EXPORT_SYMBOL(ida_destroy); /** - * ida_simple_get - get a new id. - * @ida: the (initialized) ida. - * @start: the minimum id (inclusive, < 0x8000000) - * @end: the maximum id (exclusive, < 0x8000000 or 0) - * @gfp_mask: memory allocation flags - * - * Allocates an id in the range start <= id < end, or returns -ENOSPC. - * On memory allocation failure, returns -ENOMEM. + * ida_alloc_range() - Allocate an unused ID. + * @ida: IDA handle. + * @min: Lowest ID to allocate. + * @max: Highest ID to allocate. + * @gfp: Memory allocation flags. * - * Compared to ida_get_new_above() this function does its own locking, and - * should be used unless there are special requirements. + * Allocate an ID between @min and @max, inclusive. The allocated ID will + * not exceed %INT_MAX, even if @max is larger. * - * Use ida_simple_remove() to get rid of an id. + * Context: Any context. + * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, + * or %-ENOSPC if there are no free IDs. */ -int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, - gfp_t gfp_mask) +int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, + gfp_t gfp) { - int ret, id; - unsigned int max; + int id = 0; unsigned long flags; - BUG_ON((int)start < 0); - BUG_ON((int)end < 0); + if ((int)min < 0) + return -ENOSPC; - if (end == 0) - max = 0x80000000; - else { - BUG_ON(end < start); - max = end - 1; - } + if ((int)max < 0) + max = INT_MAX; again: - if (!ida_pre_get(ida, gfp_mask)) - return -ENOMEM; - xa_lock_irqsave(&ida->ida_rt, flags); - ret = ida_get_new_above(ida, start, &id); - if (!ret) { - if (id > max) { - ida_remove(ida, id); - ret = -ENOSPC; - } else { - ret = id; - } + id = ida_get_new_above(ida, min); + if (id > (int)max) { + ida_remove(ida, id); + id = -ENOSPC; } xa_unlock_irqrestore(&ida->ida_rt, flags); - if (unlikely(ret == -EAGAIN)) + if (unlikely(id == -EAGAIN)) { + if (!ida_pre_get(ida, gfp)) + return -ENOMEM; goto again; + } - return ret; + return id; } -EXPORT_SYMBOL(ida_simple_get); +EXPORT_SYMBOL(ida_alloc_range); /** - * ida_simple_remove - remove an allocated id. - * @ida: the (initialized) ida. - * @id: the id returned by ida_simple_get. - * - * Use to release an id allocated with ida_simple_get(). + * ida_free() - Release an allocated ID. + * @ida: IDA handle. + * @id: Previously allocated ID. * - * Compared to ida_remove() this function does its own locking, and should be - * used unless there are special requirements. + * Context: Any context. */ -void ida_simple_remove(struct ida *ida, unsigned int id) +void ida_free(struct ida *ida, unsigned int id) { unsigned long flags; @@ -619,4 +576,4 @@ void ida_simple_remove(struct ida *ida, unsigned int id) ida_remove(ida, id); xa_unlock_irqrestore(&ida->ida_rt, flags); } -EXPORT_SYMBOL(ida_simple_remove); +EXPORT_SYMBOL(ida_free); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index a9e41aed6de4..bc03ecc4dfd2 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -120,7 +120,7 @@ bool is_sibling_entry(const struct radix_tree_node *parent, void *node) static inline unsigned long get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot) { - return slot - parent->slots; + return parent ? slot - parent->slots : 0; } static unsigned int radix_tree_descend(const struct radix_tree_node *parent, @@ -2106,14 +2106,6 @@ void idr_preload(gfp_t gfp_mask) } EXPORT_SYMBOL(idr_preload); -/** - * ida_pre_get - reserve resources for ida allocation - * @ida: ida handle - * @gfp: memory allocation flags - * - * This function should be called before calling ida_get_new_above(). If it - * is unable to allocate memory, it will return %0. On success, it returns %1. - */ int ida_pre_get(struct ida *ida, gfp_t gfp) { /* @@ -2134,7 +2126,6 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) return 1; } -EXPORT_SYMBOL(ida_pre_get); void __rcu **idr_get_free(struct radix_tree_root *root, struct radix_tree_iter *iter, gfp_t gfp, diff --git a/lib/test_ida.c b/lib/test_ida.c new file mode 100644 index 000000000000..2d1637d8136b --- /dev/null +++ b/lib/test_ida.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * test_ida.c: Test the IDA API + * Copyright (c) 2016-2018 Microsoft Corporation + * Copyright (c) 2018 Oracle Corporation + * Author: Matthew Wilcox <willy@infradead.org> + */ + +#include <linux/idr.h> +#include <linux/module.h> + +static unsigned int tests_run; +static unsigned int tests_passed; + +#ifdef __KERNEL__ +void ida_dump(struct ida *ida) { } +#endif +#define IDA_BUG_ON(ida, x) do { \ + tests_run++; \ + if (x) { \ + ida_dump(ida); \ + dump_stack(); \ + } else { \ + tests_passed++; \ + } \ +} while (0) + +/* + * Straightforward checks that allocating and freeing IDs work. + */ +static void ida_check_alloc(struct ida *ida) +{ + int i, id; + + for (i = 0; i < 10000; i++) + IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i); + + ida_free(ida, 20); + ida_free(ida, 21); + for (i = 0; i < 3; i++) { + id = ida_alloc(ida, GFP_KERNEL); + IDA_BUG_ON(ida, id < 0); + if (i == 2) + IDA_BUG_ON(ida, id != 10000); + } + + for (i = 0; i < 5000; i++) + ida_free(ida, i); + + IDA_BUG_ON(ida, ida_alloc_min(ida, 5000, GFP_KERNEL) != 10001); + ida_destroy(ida); + + IDA_BUG_ON(ida, !ida_is_empty(ida)); +} + +/* Destroy an IDA with a single entry at @base */ +static void ida_check_destroy_1(struct ida *ida, unsigned int base) +{ + IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) != base); + IDA_BUG_ON(ida, ida_is_empty(ida)); + ida_destroy(ida); + IDA_BUG_ON(ida, !ida_is_empty(ida)); +} + +/* Check that ida_destroy and ida_is_empty work */ +static void ida_check_destroy(struct ida *ida) +{ + /* Destroy an already-empty IDA */ + IDA_BUG_ON(ida, !ida_is_empty(ida)); + ida_destroy(ida); + IDA_BUG_ON(ida, !ida_is_empty(ida)); + + ida_check_destroy_1(ida, 0); + ida_check_destroy_1(ida, 1); + ida_check_destroy_1(ida, 1023); + ida_check_destroy_1(ida, 1024); + ida_check_destroy_1(ida, 12345678); +} + +/* + * Check what happens when we fill a leaf and then delete it. This may + * discover mishandling of IDR_FREE. + */ +static void ida_check_leaf(struct ida *ida, unsigned int base) +{ + unsigned long i; + + for (i = 0; i < IDA_BITMAP_BITS; i++) { + IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) != + base + i); + } + + ida_destroy(ida); + IDA_BUG_ON(ida, !ida_is_empty(ida)); + + IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != 0); + IDA_BUG_ON(ida, ida_is_empty(ida)); + ida_free(ida, 0); + IDA_BUG_ON(ida, !ida_is_empty(ida)); +} + +/* + * Check allocations up to and slightly above the maximum allowed (2^31-1) ID. + * Allocating up to 2^31-1 should succeed, and then allocating the next one + * should fail. + */ +static void ida_check_max(struct ida *ida) +{ + unsigned long i, j; + + for (j = 1; j < 65537; j *= 2) { + unsigned long base = (1UL << 31) - j; + for (i = 0; i < j; i++) { + IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) != + base + i); + } + IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) != + -ENOSPC); + ida_destroy(ida); + IDA_BUG_ON(ida, !ida_is_empty(ida)); + } +} + +/* + * Check handling of conversions between exceptional entries and full bitmaps. + */ +static void ida_check_conv(struct ida *ida) +{ + unsigned long i; + + for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) { + IDA_BUG_ON(ida, ida_alloc_min(ida, i + 1, GFP_KERNEL) != i + 1); + IDA_BUG_ON(ida, ida_alloc_min(ida, i + BITS_PER_LONG, + GFP_KERNEL) != i + BITS_PER_LONG); + ida_free(ida, i + 1); + ida_free(ida, i + BITS_PER_LONG); + IDA_BUG_ON(ida, !ida_is_empty(ida)); + } + + for (i = 0; i < IDA_BITMAP_BITS * 2; i++) + IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i); + for (i = IDA_BITMAP_BITS * 2; i > 0; i--) + ida_free(ida, i - 1); + IDA_BUG_ON(ida, !ida_is_empty(ida)); + + for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++) + IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i); + for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--) + ida_free(ida, i - 1); + IDA_BUG_ON(ida, !ida_is_empty(ida)); +} + +static int ida_checks(void) +{ + DEFINE_IDA(ida); + + IDA_BUG_ON(&ida, !ida_is_empty(&ida)); + ida_check_alloc(&ida); + ida_check_destroy(&ida); + ida_check_leaf(&ida, 0); + ida_check_leaf(&ida, 1024); + ida_check_leaf(&ida, 1024 * 64); + ida_check_max(&ida); + ida_check_conv(&ida); + + printk("IDA: %u of %u tests passed\n", tests_passed, tests_run); + return (tests_run != tests_passed) ? 0 : -EINVAL; +} + +static void ida_exit(void) +{ +} + +module_init(ida_checks); +module_exit(ida_exit); +MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); +MODULE_LICENSE("GPL"); diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 738871af5efa..670c84b1bfc2 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -1001,22 +1001,18 @@ static int register_pernet_operations(struct list_head *list, int error; if (ops->id) { -again: - error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id); - if (error < 0) { - if (error == -EAGAIN) { - ida_pre_get(&net_generic_ids, GFP_KERNEL); - goto again; - } + error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID, + GFP_KERNEL); + if (error < 0) return error; - } + *ops->id = error; max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); } error = __register_pernet_operations(list, ops); if (error) { rcu_barrier(); if (ops->id) - ida_remove(&net_generic_ids, *ops->id); + ida_free(&net_generic_ids, *ops->id); } return error; @@ -1027,7 +1023,7 @@ static void unregister_pernet_operations(struct pernet_operations *ops) __unregister_pernet_operations(ops); rcu_barrier(); if (ops->id) - ida_remove(&net_generic_ids, *ops->id); + ida_free(&net_generic_ids, *ops->id); } /** diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index db66f8a0d4be..37baecc3766f 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 -CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address -LDFLAGS += -fsanitize=address +CFLAGS += -I. -I../../include -g -Og -Wall -D_LGPL_SOURCE -fsanitize=address \ + -fsanitize=undefined +LDFLAGS += -fsanitize=address -fsanitize=undefined LDLIBS+= -lpthread -lurcu TARGETS = main idr-test multiorder CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o @@ -21,6 +22,7 @@ targets: generated/map-shift.h $(TARGETS) main: $(OFILES) +idr-test.o: ../../../lib/test_ida.c idr-test: idr-test.o $(CORE_OFILES) multiorder: multiorder.o $(CORE_OFILES) diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index ee820fcc29b0..321ba92c70d2 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c @@ -309,141 +309,61 @@ void idr_checks(void) idr_u32_test(0); } +#define module_init(x) +#define module_exit(x) +#define MODULE_AUTHOR(x) +#define MODULE_LICENSE(x) +#define dump_stack() assert(0) +void ida_dump(struct ida *); + +#include "../../../lib/test_ida.c" + /* * Check that we get the correct error when we run out of memory doing - * allocations. To ensure we run out of memory, just "forget" to preload. + * allocations. In userspace, GFP_NOWAIT will always fail an allocation. * The first test is for not having a bitmap available, and the second test * is for not being able to allocate a level of the radix tree. */ void ida_check_nomem(void) { DEFINE_IDA(ida); - int id, err; - - err = ida_get_new_above(&ida, 256, &id); - assert(err == -EAGAIN); - err = ida_get_new_above(&ida, 1UL << 30, &id); - assert(err == -EAGAIN); -} - -/* - * Check what happens when we fill a leaf and then delete it. This may - * discover mishandling of IDR_FREE. - */ -void ida_check_leaf(void) -{ - DEFINE_IDA(ida); int id; - unsigned long i; - for (i = 0; i < IDA_BITMAP_BITS; i++) { - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new(&ida, &id)); - assert(id == i); - } - - ida_destroy(&ida); - assert(ida_is_empty(&ida)); - - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new(&ida, &id)); - assert(id == 0); - ida_destroy(&ida); - assert(ida_is_empty(&ida)); + id = ida_alloc_min(&ida, 256, GFP_NOWAIT); + IDA_BUG_ON(&ida, id != -ENOMEM); + id = ida_alloc_min(&ida, 1UL << 30, GFP_NOWAIT); + IDA_BUG_ON(&ida, id != -ENOMEM); + IDA_BUG_ON(&ida, !ida_is_empty(&ida)); } /* * Check handling of conversions between exceptional entries and full bitmaps. */ -void ida_check_conv(void) +void ida_check_conv_user(void) { DEFINE_IDA(ida); - int id; unsigned long i; - for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) { - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new_above(&ida, i + 1, &id)); - assert(id == i + 1); - assert(!ida_get_new_above(&ida, i + BITS_PER_LONG, &id)); - assert(id == i + BITS_PER_LONG); - ida_remove(&ida, i + 1); - ida_remove(&ida, i + BITS_PER_LONG); - assert(ida_is_empty(&ida)); - } - - assert(ida_pre_get(&ida, GFP_KERNEL)); - - for (i = 0; i < IDA_BITMAP_BITS * 2; i++) { - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new(&ida, &id)); - assert(id == i); - } - - for (i = IDA_BITMAP_BITS * 2; i > 0; i--) { - ida_remove(&ida, i - 1); - } - assert(ida_is_empty(&ida)); - - for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++) { - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new(&ida, &id)); - assert(id == i); - } - - for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--) { - ida_remove(&ida, i - 1); - } - assert(ida_is_empty(&ida)); - radix_tree_cpu_dead(1); for (i = 0; i < 1000000; i++) { - int err = ida_get_new(&ida, &id); - if (err == -EAGAIN) { - assert((i % IDA_BITMAP_BITS) == (BITS_PER_LONG - 2)); - assert(ida_pre_get(&ida, GFP_KERNEL)); - err = ida_get_new(&ida, &id); + int id = ida_alloc(&ida, GFP_NOWAIT); + if (id == -ENOMEM) { + IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) != + BITS_PER_LONG - 2); + id = ida_alloc(&ida, GFP_KERNEL); } else { - assert((i % IDA_BITMAP_BITS) != (BITS_PER_LONG - 2)); + IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) == + BITS_PER_LONG - 2); } - assert(!err); - assert(id == i); + IDA_BUG_ON(&ida, id != i); } ida_destroy(&ida); } -/* - * Check allocations up to and slightly above the maximum allowed (2^31-1) ID. - * Allocating up to 2^31-1 should succeed, and then allocating the next one - * should fail. - */ -void ida_check_max(void) -{ - DEFINE_IDA(ida); - int id, err; - unsigned long i, j; - - for (j = 1; j < 65537; j *= 2) { - unsigned long base = (1UL << 31) - j; - for (i = 0; i < j; i++) { - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new_above(&ida, base, &id)); - assert(id == base + i); - } - assert(ida_pre_get(&ida, GFP_KERNEL)); - err = ida_get_new_above(&ida, base, &id); - assert(err == -ENOSPC); - ida_destroy(&ida); - assert(ida_is_empty(&ida)); - rcu_barrier(); - } -} - void ida_check_random(void) { DEFINE_IDA(ida); DECLARE_BITMAP(bitmap, 2048); - int id, err; unsigned int i; time_t s = time(NULL); @@ -454,15 +374,11 @@ void ida_check_random(void) int bit = i & 2047; if (test_bit(bit, bitmap)) { __clear_bit(bit, bitmap); - ida_remove(&ida, bit); + ida_free(&ida, bit); } else { __set_bit(bit, bitmap); - do { - ida_pre_get(&ida, GFP_KERNEL); - err = ida_get_new_above(&ida, bit, &id); - } while (err == -EAGAIN); - assert(!err); - assert(id == bit); + IDA_BUG_ON(&ida, ida_alloc_min(&ida, bit, GFP_KERNEL) + != bit); } } ida_destroy(&ida); @@ -488,71 +404,12 @@ void ida_simple_get_remove_test(void) ida_destroy(&ida); } -void ida_checks(void) +void user_ida_checks(void) { - DEFINE_IDA(ida); - int id; - unsigned long i; - radix_tree_cpu_dead(1); - ida_check_nomem(); - - for (i = 0; i < 10000; i++) { - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new(&ida, &id)); - assert(id == i); - } - - ida_remove(&ida, 20); - ida_remove(&ida, 21); - for (i = 0; i < 3; i++) { - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new(&ida, &id)); - if (i == 2) - assert(id == 10000); - } - - for (i = 0; i < 5000; i++) - ida_remove(&ida, i); - - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new_above(&ida, 5000, &id)); - assert(id == 10001); - - ida_destroy(&ida); - - assert(ida_is_empty(&ida)); - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new_above(&ida, 1, &id)); - assert(id == 1); - - ida_remove(&ida, id); - assert(ida_is_empty(&ida)); - ida_destroy(&ida); - assert(ida_is_empty(&ida)); - - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new_above(&ida, 1, &id)); - ida_destroy(&ida); - assert(ida_is_empty(&ida)); - - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new_above(&ida, 1, &id)); - assert(id == 1); - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new_above(&ida, 1025, &id)); - assert(id == 1025); - assert(ida_pre_get(&ida, GFP_KERNEL)); - assert(!ida_get_new_above(&ida, 10000, &id)); - assert(id == 10000); - ida_remove(&ida, 1025); - ida_destroy(&ida); - assert(ida_is_empty(&ida)); - - ida_check_leaf(); - ida_check_max(); - ida_check_conv(); + ida_check_nomem(); + ida_check_conv_user(); ida_check_random(); ida_simple_get_remove_test(); @@ -582,12 +439,19 @@ void ida_thread_tests(void) pthread_join(threads[i], NULL); } +void ida_tests(void) +{ + user_ida_checks(); + ida_checks(); + ida_exit(); + ida_thread_tests(); +} + int __weak main(void) { radix_tree_init(); idr_checks(); - ida_checks(); - ida_thread_tests(); + ida_tests(); radix_tree_cpu_dead(1); rcu_barrier(); if (nr_allocated) diff --git a/tools/testing/radix-tree/linux/xarray.h b/tools/testing/radix-tree/linux/xarray.h new file mode 100644 index 000000000000..df3812cda376 --- /dev/null +++ b/tools/testing/radix-tree/linux/xarray.h @@ -0,0 +1,2 @@ +#include "generated/map-shift.h" +#include "../../../../include/linux/xarray.h" diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index 257f3f8aacaa..b741686e53d6 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -27,20 +27,22 @@ void __gang_check(unsigned long middle, long down, long up, int chunk, int hop) item_check_present(&tree, middle + idx); item_check_absent(&tree, middle + up); - item_gang_check_present(&tree, middle - down, - up + down, chunk, hop); - item_full_scan(&tree, middle - down, down + up, chunk); + if (chunk > 0) { + item_gang_check_present(&tree, middle - down, up + down, + chunk, hop); + item_full_scan(&tree, middle - down, down + up, chunk); + } item_kill_tree(&tree); } void gang_check(void) { - __gang_check(1 << 30, 128, 128, 35, 2); - __gang_check(1 << 31, 128, 128, 32, 32); - __gang_check(1 << 31, 128, 128, 32, 100); - __gang_check(1 << 31, 128, 128, 17, 7); - __gang_check(0xffff0000, 0, 65536, 17, 7); - __gang_check(0xfffffffe, 1, 1, 17, 7); + __gang_check(1UL << 30, 128, 128, 35, 2); + __gang_check(1UL << 31, 128, 128, 32, 32); + __gang_check(1UL << 31, 128, 128, 32, 100); + __gang_check(1UL << 31, 128, 128, 17, 7); + __gang_check(0xffff0000UL, 0, 65536, 17, 7); + __gang_check(0xfffffffeUL, 1, 1, 17, 7); } void __big_gang_check(void) @@ -322,7 +324,7 @@ static void single_thread_tests(bool long_run) printv(2, "after dynamic_height_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); idr_checks(); - ida_checks(); + ida_tests(); rcu_barrier(); printv(2, "after idr_checks: %d allocated, preempt %d\n", nr_allocated, preempt_count); @@ -369,7 +371,6 @@ int main(int argc, char **argv) iteration_test(0, 10 + 90 * long_run); iteration_test(7, 10 + 90 * long_run); single_thread_tests(long_run); - ida_thread_tests(); /* Free any remaining preallocated nodes */ radix_tree_cpu_dead(0); diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 31f1d9b6f506..92d901eacf49 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -39,8 +39,7 @@ void multiorder_checks(void); void iteration_test(unsigned order, unsigned duration); void benchmark(void); void idr_checks(void); -void ida_checks(void); -void ida_thread_tests(void); +void ida_tests(void); struct item * item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); |