diff options
author | Daniel Axtens <dja@axtens.net> | 2015-08-14 17:41:19 +1000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-08-14 21:32:04 +1000 |
commit | 05155772f642a5b0e421adf2017f993759bf7f95 (patch) | |
tree | d8acd2b562297cedb54001af3c40b22ee11b0547 /drivers/misc/cxl/native.c | |
parent | 0b3f9c757cabad4b8101c5fcddddd029ed5506a6 (diff) |
cxl: Allocate and release the SPA with the AFU
Previously the SPA was allocated and freed upon entering and leaving
AFU-directed mode. This causes some issues for error recovery - contexts
hold a pointer inside the SPA, and they may persist after the AFU has
been detached.
We would ideally like to allocate the SPA when the AFU is allocated, and
release it until the AFU is released. However, we don't know how big the
SPA needs to be until we read the AFU descriptor.
Therefore, restructure the code:
- Allocate the SPA only once, on the first attach.
- Release the SPA only when the entire AFU is being released (not
detached). Guard the release with a NULL check, so we don't free
if it was never allocated (e.g. dedicated mode)
Acked-by: Cyril Bur <cyrilbur@gmail.com>
Signed-off-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'drivers/misc/cxl/native.c')
-rw-r--r-- | drivers/misc/cxl/native.c | 33 |
1 files changed, 22 insertions, 11 deletions
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 44568dd68bb9..b37f2e8004f5 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -183,10 +183,8 @@ static int spa_max_procs(int spa_size) return ((spa_size / 8) - 96) / 17; } -static int alloc_spa(struct cxl_afu *afu) +int cxl_alloc_spa(struct cxl_afu *afu) { - u64 spap; - /* Work out how many pages to allocate */ afu->spa_order = 0; do { @@ -205,6 +203,13 @@ static int alloc_spa(struct cxl_afu *afu) pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs); + return 0; +} + +static void attach_spa(struct cxl_afu *afu) +{ + u64 spap; + afu->sw_command_status = (__be64 *)((char *)afu->spa + ((afu->spa_max_procs + 3) * 128)); @@ -213,14 +218,19 @@ static int alloc_spa(struct cxl_afu *afu) spap |= CXL_PSL_SPAP_V; pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap); cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); - - return 0; } -static void release_spa(struct cxl_afu *afu) +static inline void detach_spa(struct cxl_afu *afu) { cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); - free_pages((unsigned long) afu->spa, afu->spa_order); +} + +void cxl_release_spa(struct cxl_afu *afu) +{ + if (afu->spa) { + free_pages((unsigned long) afu->spa, afu->spa_order); + afu->spa = NULL; + } } int cxl_tlb_slb_invalidate(struct cxl *adapter) @@ -447,8 +457,11 @@ static int activate_afu_directed(struct cxl_afu *afu) dev_info(&afu->dev, "Activating AFU directed mode\n"); - if (alloc_spa(afu)) - return -ENOMEM; + if (afu->spa == NULL) { + if (cxl_alloc_spa(afu)) + return -ENOMEM; + } + attach_spa(afu); cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU); cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); @@ -559,8 +572,6 @@ static int deactivate_afu_directed(struct cxl_afu *afu) cxl_afu_disable(afu); cxl_psl_purge(afu); - release_spa(afu); - return 0; } |