From: Christoph Hellwig Sort out the locking mess in spu_base and document the current rules. As an added benefit spu_alloc* and spu_free don't block anymore. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann Index: linux-2.6/arch/powerpc/platforms/cell/spu_base.c =================================================================== --- linux-2.6.orig/arch/powerpc/platforms/cell/spu_base.c +++ linux-2.6/arch/powerpc/platforms/cell/spu_base.c @@ -41,12 +41,31 @@ const struct spu_management_ops *spu_man EXPORT_SYMBOL_GPL(spu_management_ops); const struct spu_priv1_ops *spu_priv1_ops; +EXPORT_SYMBOL_GPL(spu_priv1_ops); + +struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; +EXPORT_SYMBOL_GPL(cbe_spu_info); +/* + * Protects cbe_spu_info and spu->number. + */ +static DEFINE_SPINLOCK(spu_lock); + +/* + * List of all spus in the system. + * + * This list is iterated by callers from irq context and callers that + * want to sleep. Thus modifications need to be done with both + * spu_full_list_lock and spu_full_list_mutex held, while iterating + * through it requires either of these locks. + * + * In addition spu_full_list_lock protects all assignmens to + * spu->mm. + */ static LIST_HEAD(spu_full_list); -static DEFINE_MUTEX(spu_mutex); -static spinlock_t spu_list_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(spu_full_list_lock); +static DEFINE_MUTEX(spu_full_list_mutex); -EXPORT_SYMBOL_GPL(spu_priv1_ops); void spu_invalidate_slbs(struct spu *spu) { @@ -65,12 +84,12 @@ void spu_flush_all_slbs(struct mm_struct struct spu *spu; unsigned long flags; - spin_lock_irqsave(&spu_list_lock, flags); + spin_lock_irqsave(&spu_full_list_lock, flags); list_for_each_entry(spu, &spu_full_list, full_list) { if (spu->mm == mm) spu_invalidate_slbs(spu); } - spin_unlock_irqrestore(&spu_list_lock, flags); + spin_unlock_irqrestore(&spu_full_list_lock, flags); } /* The hack below stinks... try to do something better one of @@ -88,9 +107,9 @@ void spu_associate_mm(struct spu *spu, s { unsigned long flags; - spin_lock_irqsave(&spu_list_lock, flags); + spin_lock_irqsave(&spu_full_list_lock, flags); spu->mm = mm; - spin_unlock_irqrestore(&spu_list_lock, flags); + spin_unlock_irqrestore(&spu_full_list_lock, flags); if (mm) mm_needs_global_tlbie(mm); } @@ -428,7 +447,7 @@ struct spu *spu_alloc_spu(struct spu *re { struct spu *spu, *ret = NULL; - mutex_lock(&spu_mutex); + spin_lock(&spu_lock); list_for_each_entry(spu, &cbe_spu_info[req_spu->node].free_spus, list) { if (spu == req_spu) { list_del_init(&spu->list); @@ -438,7 +457,7 @@ struct spu *spu_alloc_spu(struct spu *re break; } } - mutex_unlock(&spu_mutex); + spin_unlock(&spu_lock); return ret; } EXPORT_SYMBOL_GPL(spu_alloc_spu); @@ -447,14 +466,14 @@ struct spu *spu_alloc_node(int node) { struct spu *spu = NULL; - mutex_lock(&spu_mutex); + spin_lock(&spu_lock); if (!list_empty(&cbe_spu_info[node].free_spus)) { spu = list_entry(cbe_spu_info[node].free_spus.next, struct spu, list); list_del_init(&spu->list); pr_debug("Got SPU %d %d\n", spu->number, spu->node); } - mutex_unlock(&spu_mutex); + spin_unlock(&spu_lock); if (spu) spu_init_channels(spu); @@ -478,9 +497,9 @@ struct spu *spu_alloc(void) void spu_free(struct spu *spu) { - mutex_lock(&spu_mutex); + spin_lock(&spu_lock); list_add_tail(&spu->list, &cbe_spu_info[spu->node].free_spus); - mutex_unlock(&spu_mutex); + spin_unlock(&spu_lock); } EXPORT_SYMBOL_GPL(spu_free); @@ -491,12 +510,12 @@ struct sysdev_class spu_sysdev_class = { int spu_add_sysdev_attr(struct sysdev_attribute *attr) { struct spu *spu; - mutex_lock(&spu_mutex); + mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) sysdev_create_file(&spu->sysdev, attr); + mutex_unlock(&spu_full_list_mutex); - mutex_unlock(&spu_mutex); return 0; } EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); @@ -504,12 +523,12 @@ EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); int spu_add_sysdev_attr_group(struct attribute_group *attrs) { struct spu *spu; - mutex_lock(&spu_mutex); + mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) sysfs_create_group(&spu->sysdev.kobj, attrs); + mutex_unlock(&spu_full_list_mutex); - mutex_unlock(&spu_mutex); return 0; } EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); @@ -518,24 +537,22 @@ EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_gr void spu_remove_sysdev_attr(struct sysdev_attribute *attr) { struct spu *spu; - mutex_lock(&spu_mutex); + mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) sysdev_remove_file(&spu->sysdev, attr); - - mutex_unlock(&spu_mutex); + mutex_unlock(&spu_full_list_mutex); } EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); void spu_remove_sysdev_attr_group(struct attribute_group *attrs) { struct spu *spu; - mutex_lock(&spu_mutex); + mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) sysfs_remove_group(&spu->sysdev.kobj, attrs); - - mutex_unlock(&spu_mutex); + mutex_unlock(&spu_full_list_mutex); } EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); @@ -570,9 +587,9 @@ static int __init create_spu(void *data) goto out; spin_lock_init(&spu->register_lock); - mutex_lock(&spu_mutex); + spin_lock(&spu_lock); spu->number = number++; - mutex_unlock(&spu_mutex); + spin_unlock(&spu_lock); ret = spu_create_spu(spu, data); @@ -589,14 +606,17 @@ static int __init create_spu(void *data) if (ret) goto out_free_irqs; - mutex_lock(&spu_mutex); - spin_lock_irqsave(&spu_list_lock, flags); + spin_lock(&spu_lock); list_add(&spu->list, &cbe_spu_info[spu->node].free_spus); list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); cbe_spu_info[spu->node].n_spus++; + spin_unlock(&spu_lock); + + mutex_lock(&spu_full_list_mutex); + spin_lock_irqsave(&spu_full_list_lock, flags); list_add(&spu->full_list, &spu_full_list); - spin_unlock_irqrestore(&spu_list_lock, flags); - mutex_unlock(&spu_mutex); + spin_unlock_irqrestore(&spu_full_list_lock, flags); + mutex_unlock(&spu_full_list_mutex); INIT_LIST_HEAD(&spu->aff_list); @@ -612,9 +632,6 @@ out: return ret; } -struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; -EXPORT_SYMBOL_GPL(cbe_spu_info); - /* Hardcoded affinity idxs for QS20 */ #define SPES_PER_BE 8 static int QS20_reg_idxs[SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; @@ -772,7 +789,9 @@ static int __init init_spu_base(void) goto out_unregister_sysdev_class; } + mutex_lock(&spu_full_list_mutex); xmon_register_spus(&spu_full_list); + mutex_unlock(&spu_full_list_mutex); if (of_has_vicinity()) { init_aff_fw_vicinity(); --