From 453852f7452444e3c8aafedc3878b360342b8332 Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Thu, 12 Oct 2023 09:43:50 -0700 Subject: [PATCH 01/13] prov/shm: simplify headers so everything is included in smr.h Signed-off-by: Alexia Ingerson --- prov/shm/Makefile.include | 1 - prov/shm/src/smr.h | 74 ++++++++++++++++++++++++++----------- prov/shm/src/smr_atomic.c | 6 --- prov/shm/src/smr_comp.c | 5 --- prov/shm/src/smr_cq.c | 3 -- prov/shm/src/smr_domain.c | 3 -- prov/shm/src/smr_dsa.c | 11 ------ prov/shm/src/smr_dsa.h | 66 --------------------------------- prov/shm/src/smr_ep.c | 13 +------ prov/shm/src/smr_fabric.c | 3 -- prov/shm/src/smr_init.c | 6 --- prov/shm/src/smr_msg.c | 5 --- prov/shm/src/smr_progress.c | 11 ------ prov/shm/src/smr_rma.c | 6 --- prov/shm/src/smr_signal.h | 3 +- prov/shm/src/smr_util.c | 13 ------- prov/shm/src/smr_util.h | 17 --------- 17 files changed, 55 insertions(+), 191 deletions(-) delete mode 100644 prov/shm/src/smr_dsa.h diff --git a/prov/shm/Makefile.include b/prov/shm/Makefile.include index df1dc4f8885..a8d893435a4 100644 --- a/prov/shm/Makefile.include +++ b/prov/shm/Makefile.include @@ -15,7 +15,6 @@ _shm_files = \ prov/shm/src/smr_av.c \ prov/shm/src/smr_signal.h \ prov/shm/src/smr.h \ - prov/shm/src/smr_dsa.h \ prov/shm/src/smr_dsa.c \ prov/shm/src/smr_util.h \ prov/shm/src/smr_util.c diff --git a/prov/shm/src/smr.h b/prov/shm/src/smr.h index d4287fdba88..12581517b91 100644 --- a/prov/shm/src/smr.h +++ b/prov/shm/src/smr.h @@ -30,17 +30,28 @@ * SOFTWARE. */ +#ifndef _SMR_H_ +#define _SMR_H_ + #if HAVE_CONFIG_H # include #endif /* HAVE_CONFIG_H */ -#include -#include +#include #include -#include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include -#include #include #include #include @@ -50,28 +61,34 @@ #include #include #include -#include +#include #include +#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "ofi.h" +#include "ofi_atom.h" +#include "ofi_atomic.h" +#include "ofi_atomic_queue.h" +#include "ofi_enosys.h" +#include "ofi_epoll.h" +#include "ofi_hmem.h" +#include "ofi_iov.h" +#include "ofi_list.h" +#include "ofi_lock.h" +#include "ofi_mb.h" +#include "ofi_mem.h" +#include "ofi_mr.h" +#include "ofi_proto.h" +#include "ofi_prov.h" +#include "ofi_rbuf.h" +#include "ofi_shm_p2p.h" +#include "ofi_signal.h" +#include "ofi_tree.h" +#include "ofi_util.h" +#include "ofi_xpmem.h" #include "smr_util.h" -#ifndef _SMR_H_ -#define _SMR_H_ - struct smr_env { size_t sar_threshold; int disable_cma; @@ -365,4 +382,19 @@ static inline void smr_progress_ipc_list_noop(struct smr_ep *ep) // noop } +/* SMR FUNCTIONS FOR DSA SUPPORT */ +void smr_dsa_init(void); +void smr_dsa_cleanup(void); +size_t smr_dsa_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, + struct smr_resp *resp, struct smr_cmd *cmd, + const struct iovec *iov, size_t count, size_t *bytes_done, + void *entry_ptr); +size_t smr_dsa_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, + struct smr_resp *resp, struct smr_cmd *cmd, + const struct iovec *iov, size_t count, size_t *bytes_done, + void *entry_ptr); +void smr_dsa_context_init(struct smr_ep *ep); +void smr_dsa_context_cleanup(struct smr_ep *ep); +void smr_dsa_progress(struct smr_ep *ep); + #endif diff --git a/prov/shm/src/smr_atomic.c b/prov/shm/src/smr_atomic.c index a3c326ec6d6..79478b2bf9e 100644 --- a/prov/shm/src/smr_atomic.c +++ b/prov/shm/src/smr_atomic.c @@ -30,14 +30,8 @@ * SOFTWARE. */ -#include -#include -#include - -#include "ofi_iov.h" #include "smr.h" - static void smr_format_rma_ioc(struct smr_cmd *cmd, const struct fi_rma_ioc *rma_ioc, size_t ioc_count) { diff --git a/prov/shm/src/smr_comp.c b/prov/shm/src/smr_comp.c index c8e9b3a38db..b50e787fcd6 100644 --- a/prov/shm/src/smr_comp.c +++ b/prov/shm/src/smr_comp.c @@ -30,11 +30,6 @@ * SOFTWARE. */ -#include -#include -#include - -#include "ofi_iov.h" #include "smr.h" int smr_complete_tx(struct smr_ep *ep, void *context, uint32_t op, diff --git a/prov/shm/src/smr_cq.c b/prov/shm/src/smr_cq.c index 4e567863ff6..0d345bfa312 100644 --- a/prov/shm/src/smr_cq.c +++ b/prov/shm/src/smr_cq.c @@ -30,9 +30,6 @@ * SOFTWARE. */ -#include -#include - #include "smr.h" int smr_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, diff --git a/prov/shm/src/smr_domain.c b/prov/shm/src/smr_domain.c index 909298fbc2d..a5ff16c401d 100644 --- a/prov/shm/src/smr_domain.c +++ b/prov/shm/src/smr_domain.c @@ -30,9 +30,6 @@ * SOFTWARE. */ -#include -#include - #include "smr.h" extern struct fi_ops_srx_peer smr_srx_peer_ops; diff --git a/prov/shm/src/smr_dsa.c b/prov/shm/src/smr_dsa.c index daa91cbeb4e..328f7117b5c 100644 --- a/prov/shm/src/smr_dsa.c +++ b/prov/shm/src/smr_dsa.c @@ -39,21 +39,10 @@ #if SHM_HAVE_DSA #include -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include #include // _mm_pause -#include "smr_util.h" -#include "smr_dsa.h" - #define MAX_WQS_PER_EP 4 #define GENCAP_CACHE_CTRL_MEM 0x4 #define LIMITED_MSIX_PORTAL_OFFSET 0x1000 diff --git a/prov/shm/src/smr_dsa.h b/prov/shm/src/smr_dsa.h deleted file mode 100644 index c1a637eebed..00000000000 --- a/prov/shm/src/smr_dsa.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2022 Intel Corporation. All rights reserved - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef _DSA_SHM_H_ -#define _DSA_SHM_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#if HAVE_CONFIG_H -# include -#endif /* HAVE_CONFIG_H */ - -#include -#include -#include "smr.h" - -/* SMR FUNCTIONS FOR DSA SUPPORT */ -void smr_dsa_init(void); -void smr_dsa_cleanup(void); -size_t smr_dsa_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_resp *resp, struct smr_cmd *cmd, - const struct iovec *iov, size_t count, size_t *bytes_done, - void *entry_ptr); -size_t smr_dsa_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_resp *resp, struct smr_cmd *cmd, - const struct iovec *iov, size_t count, size_t *bytes_done, - void *entry_ptr); -void smr_dsa_context_init(struct smr_ep *ep); -void smr_dsa_context_cleanup(struct smr_ep *ep); -void smr_dsa_progress(struct smr_ep *ep); - -#ifdef __cplusplus -} -#endif -#endif /* _DSA_SHM_H_ */ diff --git a/prov/shm/src/smr_ep.c b/prov/shm/src/smr_ep.c index 82bc95200b1..dd07a452429 100644 --- a/prov/shm/src/smr_ep.c +++ b/prov/shm/src/smr_ep.c @@ -30,19 +30,8 @@ * SOFTWARE. */ -#include -#include -#include -#include - -#include "ofi_iov.h" -#include "ofi_hmem.h" -#include "ofi_mr.h" -#include "ofi_mb.h" -#include "smr_signal.h" #include "smr.h" -#include "smr_dsa.h" -#include "ofi_xpmem.h" +#include "smr_signal.h" extern struct fi_ops_msg smr_msg_ops, smr_no_recv_msg_ops; extern struct fi_ops_tagged smr_tag_ops, smr_no_recv_tag_ops; diff --git a/prov/shm/src/smr_fabric.c b/prov/shm/src/smr_fabric.c index 74fe97cd909..854f52b1440 100644 --- a/prov/shm/src/smr_fabric.c +++ b/prov/shm/src/smr_fabric.c @@ -30,9 +30,6 @@ * SOFTWARE. */ -#include -#include - #include "smr.h" static int smr_wait_open(struct fid_fabric *fabric_fid, diff --git a/prov/shm/src/smr_init.c b/prov/shm/src/smr_init.c index 3f2c6656637..076384a7ba6 100644 --- a/prov/shm/src/smr_init.c +++ b/prov/shm/src/smr_init.c @@ -30,13 +30,7 @@ * SOFTWARE. */ -#include - -#include #include "smr.h" -#include "smr_signal.h" -#include "smr_dsa.h" -#include struct sigaction *old_action = NULL; diff --git a/prov/shm/src/smr_msg.c b/prov/shm/src/smr_msg.c index 0a34ae637ff..8adfa7d11e8 100644 --- a/prov/shm/src/smr_msg.c +++ b/prov/shm/src/smr_msg.c @@ -31,11 +31,6 @@ * SOFTWARE. */ -#include -#include -#include - -#include "ofi_iov.h" #include "smr.h" static ssize_t smr_recvmsg(struct fid_ep *ep_fid, const struct fi_msg *msg, diff --git a/prov/shm/src/smr_progress.c b/prov/shm/src/smr_progress.c index 3932e404c15..e91035a50d9 100644 --- a/prov/shm/src/smr_progress.c +++ b/prov/shm/src/smr_progress.c @@ -30,18 +30,7 @@ * SOFTWARE. */ -#include -#include -#include - -#include "ofi_iov.h" -#include "ofi_hmem.h" -#include "ofi_atom.h" -#include "ofi_mb.h" -#include "ofi_mr.h" -#include "ofi_shm_p2p.h" #include "smr.h" -#include "smr_dsa.h" static inline void smr_try_progress_to_sar(struct smr_ep *ep, struct smr_region *smr, diff --git a/prov/shm/src/smr_rma.c b/prov/shm/src/smr_rma.c index feeae75c1b7..c12f6cdd89d 100644 --- a/prov/shm/src/smr_rma.c +++ b/prov/shm/src/smr_rma.c @@ -31,12 +31,6 @@ * SOFTWARE. */ -#include -#include -#include - -#include "ofi_iov.h" -#include "ofi_shm_p2p.h" #include "smr.h" static void smr_add_rma_cmd(struct smr_region *peer_smr, diff --git a/prov/shm/src/smr_signal.h b/prov/shm/src/smr_signal.h index 164a96714d8..563a64bfaf1 100644 --- a/prov/shm/src/smr_signal.h +++ b/prov/shm/src/smr_signal.h @@ -34,8 +34,7 @@ #ifndef _SMR_SIGNAL_H_ #define _SMR_SIGNAL_H_ -#include -#include "smr_util.h" + #include "smr.h" extern struct sigaction *old_action; diff --git a/prov/shm/src/smr_util.c b/prov/shm/src/smr_util.c index 0372c7e0597..fd67ecb6244 100644 --- a/prov/shm/src/smr_util.c +++ b/prov/shm/src/smr_util.c @@ -30,19 +30,6 @@ * SOFTWARE. */ -#include "config.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "smr_util.h" #include "smr.h" struct dlist_entry ep_name_list; diff --git a/prov/shm/src/smr_util.h b/prov/shm/src/smr_util.h index 7ed4e1e426f..407736f06eb 100644 --- a/prov/shm/src/smr_util.h +++ b/prov/shm/src/smr_util.h @@ -33,23 +33,6 @@ #ifndef _OFI_SHM_H_ #define _OFI_SHM_H_ -#include "config.h" - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - #ifdef __cplusplus extern "C" { #endif From cca1bb756f4dc1d94e427756a2dd3cda99f84eb5 Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Wed, 11 Dec 2024 15:50:28 -0800 Subject: [PATCH 02/13] include/ofi_mem: add function to return number of free elements in freestack Signed-off-by: Alexia Ingerson --- include/ofi_mem.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/ofi_mem.h b/include/ofi_mem.h index 7463a025675..a0f42092392 100644 --- a/include/ofi_mem.h +++ b/include/ofi_mem.h @@ -318,6 +318,12 @@ static inline void* smr_freestack_pop(struct smr_freestack *fs) { return (void *) ( ((char*)fs) + smr_freestack_pop_by_offset(fs) ); } + +static inline int16_t smr_freestack_avail(struct smr_freestack *fs) +{ + return fs->free; +} + /* * Buffer Pool */ From d032d6dd97b6abcb979a14e77f28368cb2218e3b Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Fri, 1 Nov 2024 14:07:12 -0700 Subject: [PATCH 03/13] include/ofi.h: add compile time static assert Signed-off-by: Alexia Ingerson --- include/ofi.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/ofi.h b/include/ofi.h index 9661a7553d9..cde486efb5f 100644 --- a/include/ofi.h +++ b/include/ofi.h @@ -274,6 +274,8 @@ static inline int ofi_val32_ge(uint32_t x, uint32_t y) { #define IFFLAGSTRN2(flags, SYMVAL, SYMNAME, N) \ do { if (flags & SYMVAL) ofi_strncatf(buf, N, #SYMNAME ", "); } while(0) +#define STATIC_ASSERT(cond, name) \ + typedef char static_assertion_##name[(cond) ? 1 : -1] /* * CPU specific features From 7fa5affe188f52ca907f5c48abc7ddbcc5d18caf Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Thu, 19 Dec 2024 14:07:02 -0800 Subject: [PATCH 04/13] prov/shm: new shm Turn response queue into return queue for local commands Inline commands are still receive side All commands have an inline option but a common ptr to the command being used for remote commands. These commands have to be returned to the sender but the receive side can hold onto them as long as needed for the lifetime of the message Signed-off-by: Alexia Ingerson --- prov/shm/Makefile.include | 1 - prov/shm/src/smr.h | 521 +++++++++++---- prov/shm/src/smr_atomic.c | 173 ++--- prov/shm/src/smr_attr.c | 4 +- prov/shm/src/smr_av.c | 47 +- prov/shm/src/smr_cntr.c | 2 +- prov/shm/src/smr_comp.c | 7 +- prov/shm/src/smr_cq.c | 2 +- prov/shm/src/smr_domain.c | 11 +- prov/shm/src/smr_dsa.c | 68 +- prov/shm/src/smr_ep.c | 741 +++++++-------------- prov/shm/src/smr_fabric.c | 2 +- prov/shm/src/smr_init.c | 19 +- prov/shm/src/smr_msg.c | 95 ++- prov/shm/src/smr_progress.c | 1207 ++++++++++++++++------------------- prov/shm/src/smr_rma.c | 115 ++-- prov/shm/src/smr_signal.h | 10 +- prov/shm/src/smr_util.c | 156 ++--- prov/shm/src/smr_util.h | 360 ----------- 19 files changed, 1564 insertions(+), 1977 deletions(-) delete mode 100644 prov/shm/src/smr_util.h diff --git a/prov/shm/Makefile.include b/prov/shm/Makefile.include index a8d893435a4..eef57cc9905 100644 --- a/prov/shm/Makefile.include +++ b/prov/shm/Makefile.include @@ -16,7 +16,6 @@ _shm_files = \ prov/shm/src/smr_signal.h \ prov/shm/src/smr.h \ prov/shm/src/smr_dsa.c \ - prov/shm/src/smr_util.h \ prov/shm/src/smr_util.c diff --git a/prov/shm/src/smr.h b/prov/shm/src/smr.h index 12581517b91..41b619f6dda 100644 --- a/prov/shm/src/smr.h +++ b/prov/shm/src/smr.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2021 Intel Corporation, Inc. All rights reserved. + * Copyright (c) Intel Corporation, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -87,7 +87,361 @@ #include "ofi_util.h" #include "ofi_xpmem.h" -#include "smr_util.h" +#define SMR_VERSION 9 + +#define SMR_FLAG_ATOMIC (1 << 0) +#define SMR_FLAG_DEBUG (1 << 1) +#define SMR_FLAG_HMEM_ENABLED (1 << 3) + +//shm region defines +#define SMR_CMD_SIZE 440 /* align with 64-byte cache line */ + +//reserves 0-255 for defined ops and room for new ops +//256 and beyond reserved for ctrl ops +#define SMR_OP_MAX (1 << 8) + +#define SMR_REMOTE_CQ_DATA (1 << 0) + +/* SMR op_src: Specifies data source location */ +enum { + smr_proto_inline, /* command data */ + smr_proto_inject, /* inject buffers */ + smr_proto_iov, /* reference iovec via CMA */ + smr_proto_sar, /* segmentation fallback protocol */ + smr_proto_ipc, /* device IPC handle protocol */ + smr_proto_max, +}; + +/* CMA/XPMEM capability. Generic acronym used: + * VMA: Virtual Memory Address */ +enum { + SMR_VMA_CAP_NA, + SMR_VMA_CAP_ON, + SMR_VMA_CAP_OFF, +}; + +/* + * Unique smr_op_hdr for smr message protocol: + * entry - for internal use managing commands (must be kept first) + * tx_ctx - source side context (unused by target side) + * rx_ctx - target side context (unused by source side) + * id - local shm_id of peer sending msg (for shm lookup) + * op - type of op (ex. ofi_op_msg, defined in ofi_proto.h) + * proto - msg src (ex. smr_src_inline, defined above) + * op_flags - operation flags (ex. SMR_REMOTE_CQ_DATA, defined above) + * size - size of data transfer + * proto_data - src of additional protocol data (inject offset) + * status - returned status of operation + * cq_data - remote CQ data + */ +struct smr_cmd_hdr { + uint64_t entry; + uint64_t tx_ctx; + uint64_t rx_ctx; + int64_t id; + uint32_t op; + uint16_t proto; + uint16_t op_flags; + + uint64_t size; + uint64_t proto_data; + int64_t status; + uint64_t cq_data; + union { + uint64_t tag; + struct { + uint8_t datatype; + uint8_t atomic_op; + }; + }; +} __attribute__ ((aligned(16))); + +#define SMR_BUF_BATCH_MAX 64 +#define SMR_MSG_DATA_LEN (SMR_CMD_SIZE - \ + (sizeof(struct smr_cmd_hdr) + \ + sizeof(struct smr_cmd_rma))) +#define SMR_IOV_LIMIT 4 + +struct smr_cmd_rma { + uint64_t rma_count; + union { + struct fi_rma_iov rma_iov[SMR_IOV_LIMIT]; + struct fi_rma_ioc rma_ioc[SMR_IOV_LIMIT]; + }; +}; + +struct smr_cmd_data { + union { + uint8_t msg[SMR_MSG_DATA_LEN]; + struct { + size_t iov_count; + struct iovec iov[SMR_IOV_LIMIT]; + }; + struct { + uint32_t buf_batch_size; + int16_t sar[SMR_BUF_BATCH_MAX]; + }; + struct ipc_info ipc_info; + }; +}; +STATIC_ASSERT(sizeof(struct smr_cmd_data) == SMR_MSG_DATA_LEN, smr_cmd_size); + +struct smr_cmd { + struct smr_cmd_hdr hdr; + struct smr_cmd_data data; + struct smr_cmd_rma rma; +}; + +#define SMR_INJECT_SIZE (1 << 12) //4096 +#define SMR_COMP_INJECT_SIZE (SMR_INJECT_SIZE / 2) +#define SMR_SAR_SIZE (1 << 15) //32768 + +#define SMR_DIR "/dev/shm/" +#define SMR_NAME_MAX 256 +#define SMR_PATH_MAX (SMR_NAME_MAX + sizeof(SMR_DIR)) + +struct smr_peer_data { + int64_t id; + uint32_t sar_status; + uint16_t name_sent; + uint16_t ipc_valid; + uintptr_t local_region; + struct ofi_xpmem_client xpmem; +}; + +extern struct dlist_entry ep_name_list; +extern pthread_mutex_t ep_list_lock; + +struct smr_region; + +struct smr_ep_name { + char name[SMR_NAME_MAX]; + struct smr_region *region; + struct dlist_entry entry; +}; + +static inline const char *smr_no_prefix(const char *addr) +{ + char *start; + + return (start = strstr(addr, "://")) ? start + 3 : addr; +} + +struct smr_peer { + char name[SMR_NAME_MAX]; + bool id_assigned; + fi_addr_t fiaddr; + struct smr_region *region; + int pid_fd; +}; + +#define SMR_MAX_PEERS 256 + +struct smr_map { + ofi_spin_t lock; + int64_t cur_id; + int num_peers; + uint16_t flags; + struct ofi_rbmap rbmap; + struct smr_peer peers[SMR_MAX_PEERS]; +}; + +struct smr_region { + uint8_t version; + uint8_t resv; + uint16_t flags; + int pid; + uint8_t cma_cap_peer; + uint8_t cma_cap_self; + uint8_t xpmem_cap_self; + uint8_t resv2; + + uint32_t max_sar_buf_per_peer; + struct ofi_xpmem_pinfo xpmem_self; + struct ofi_xpmem_pinfo xpmem_peer; + void *base_addr; + + char name[SMR_NAME_MAX]; + + size_t total_size; + + /* offsets from start of smr_region */ + size_t cmd_queue_offset; + size_t cmd_stack_offset; + size_t inject_pool_offset; + size_t ret_queue_offset; + size_t sar_pool_offset; + size_t peer_data_offset; +}; + +struct smr_inject_buf { + union { + uint8_t data[SMR_INJECT_SIZE]; + struct { + uint8_t buf[SMR_COMP_INJECT_SIZE]; + uint8_t comp[SMR_COMP_INJECT_SIZE]; + }; + }; +}; + +struct smr_sar_buf { + uint8_t buf[SMR_SAR_SIZE]; +}; + +struct smr_cmd_entry { + uintptr_t ptr; + struct smr_cmd cmd; +}; + +//temporary wrapper until I get it right +struct smr_return_entry { + uintptr_t ptr; +}; + +/* Queue of offsets of the command blocks obtained from the command pool + * freestack + */ +OFI_DECLARE_ATOMIC_Q(struct smr_cmd_entry, smr_cmd_queue); +OFI_DECLARE_ATOMIC_Q(struct smr_return_entry, smr_return_queue); + +struct smr_ep { + struct util_ep util_ep; + size_t tx_size; + size_t rx_size; + const char *name; + uint64_t msg_id; + struct smr_region *volatile region; + struct fid_peer_srx *srx; + struct ofi_bufpool *cmd_ctx_pool; + struct ofi_bufpool *unexp_buf_pool; + struct ofi_bufpool *pend_buf_pool; + + struct smr_tx_fs *tx_fs; + struct slist overflow_list; + struct dlist_entry ipc_cpy_pend_list; + size_t min_multi_recv_size; + + int ep_idx; + enum ofi_shm_p2p_type p2p_type; + void *dsa_context; + void (*smr_progress_ipc_list)(struct smr_ep *ep); +}; + +struct smr_av { + struct util_av util_av; + struct smr_map smr_map; + size_t used; +}; + +static inline struct smr_region *smr_peer_region(struct smr_ep *ep, int i) +{ + return container_of(ep->util_ep.av, struct smr_av, util_av)-> + smr_map.peers[i].region; +} +static inline struct smr_cmd_queue *smr_cmd_queue(struct smr_region *smr) +{ + return (struct smr_cmd_queue *) ((char *) smr + smr->cmd_queue_offset); +} +static inline struct smr_freestack *smr_cmd_stack(struct smr_region *smr) +{ + return (struct smr_freestack *) ((char *) smr + smr->cmd_stack_offset); +} +static inline struct smr_freestack *smr_inject_pool(struct smr_region *smr) +{ + return (struct smr_freestack *) ((char *) smr + smr->inject_pool_offset); +} +static inline struct smr_return_queue *smr_return_queue(struct smr_region *smr) +{ + return (struct smr_return_queue *) ((char *) smr + smr->ret_queue_offset); +} +static inline struct smr_peer_data *smr_peer_data(struct smr_region *smr) +{ + return (struct smr_peer_data *) ((char *) smr + smr->peer_data_offset); +} +static inline struct smr_freestack *smr_sar_pool(struct smr_region *smr) +{ + return (struct smr_freestack *) ((char *) smr + smr->sar_pool_offset); +} + +struct smr_attr { + const char *name; + size_t rx_count; + size_t tx_count; + uint16_t flags; +}; +size_t smr_calculate_size_offsets(size_t tx_count, size_t rx_count, + size_t *cmd_offset, size_t *cs_offset, + size_t *inject_offset, size_t *rq_offset, + size_t *sar_offset, size_t *peer_offset); +void smr_cma_check(struct smr_region *region, + struct smr_region *peer_region); +void smr_cleanup(void); +int smr_map_to_region(const struct fi_provider *prov, struct smr_map *map, + int64_t id); +void smr_map_to_endpoint(struct smr_ep *ep, int64_t id); +void smr_unmap_region(const struct fi_provider *prov, struct smr_map *map, + int64_t id, bool found); +void smr_unmap_from_endpoint(struct smr_ep *ep, int64_t id); +void smr_exchange_all_peers(struct smr_ep *ep); +int smr_map_add(const struct fi_provider *prov, struct smr_map *map, + const char *name, int64_t *id); +void smr_map_del(struct smr_map *map, int64_t id); + +struct smr_region *smr_map_get(struct smr_map *map, int64_t id); + +int smr_create(const struct fi_provider *prov, struct smr_map *map, + const struct smr_attr *attr, struct smr_region *volatile *smr); +void smr_free(struct smr_region *smr); + +static inline uintptr_t smr_local_to_peer(struct smr_ep *ep, + int64_t id, int64_t peer_id, + uintptr_t local_ptr) +{ + struct smr_region *peer_smr = smr_peer_region(ep, id); + uint64_t offset = local_ptr - (uintptr_t) ep->region; + + return smr_peer_data(peer_smr)[peer_id].local_region + offset; +} + +static inline uintptr_t smr_peer_to_peer(struct smr_ep *ep, + int64_t id, uintptr_t local_ptr) +{ + struct smr_region *peer_smr = smr_peer_region(ep, id); + uint64_t offset = local_ptr - (uintptr_t) peer_smr; + + return (uintptr_t) peer_smr->base_addr + offset; +} + +static inline uintptr_t smr_peer_to_owner(struct smr_ep *ep, + int64_t id, uintptr_t local_ptr) +{ + struct smr_region *peer_smr = smr_peer_region(ep, id); + uint64_t offset = local_ptr - (uintptr_t) peer_smr; + + return (uintptr_t) peer_smr->base_addr + offset; +} + +static inline void smr_return_cmd(struct smr_ep *ep, struct smr_cmd *cmd) +{ + struct smr_region *peer_smr = smr_peer_region(ep, cmd->hdr.id); + uintptr_t peer_ptr = smr_peer_to_owner(ep, cmd->hdr.id, (uintptr_t) cmd); + int64_t pos; + struct smr_return_entry *queue_entry; + int ret; + + ret = smr_return_queue_next(smr_return_queue(peer_smr), &queue_entry, &pos); + if (ret == -FI_ENOENT) { + //return queue runs in parallel to command stack + //ie we will never run out of space + assert(0); + } + + assert(peer_ptr >= (uintptr_t) peer_smr->base_addr && + peer_ptr < (uintptr_t) peer_smr->base_addr + peer_smr->total_size); + queue_entry->ptr = peer_ptr; + + smr_return_queue_commit(queue_entry, pos); +} struct smr_env { size_t sar_threshold; @@ -104,13 +458,7 @@ extern struct util_prov smr_util_prov; extern int smr_global_ep_idx; //protected by the ep_list_lock int smr_fabric(struct fi_fabric_attr *attr, struct fid_fabric **fabric, - void *context); - -struct smr_av { - struct util_av util_av; - struct smr_map smr_map; - size_t used; -}; + void *context); static inline int64_t smr_addr_lookup(struct util_av *av, fi_addr_t fiaddr) { @@ -118,7 +466,7 @@ static inline int64_t smr_addr_lookup(struct util_av *av, fi_addr_t fiaddr) } int smr_domain_open(struct fid_fabric *fabric, struct fi_info *info, - struct fid_domain **dom, void *context); + struct fid_domain **dom, void *context); int smr_eq_open(struct fid_fabric *fabric, struct fi_eq_attr *attr, struct fid_eq **eq, void *context); @@ -127,26 +475,24 @@ int smr_av_open(struct fid_domain *domain, struct fi_av_attr *attr, struct fid_av **av, void *context); int smr_query_atomic(struct fid_domain *domain, enum fi_datatype datatype, - enum fi_op op, struct fi_atomic_attr *attr, uint64_t flags); - -#define SMR_IOV_LIMIT 4 + enum fi_op op, struct fi_atomic_attr *attr, + uint64_t flags); struct smr_tx_entry { - struct smr_cmd cmd; - int64_t peer_id; - void *context; - struct iovec iov[SMR_IOV_LIMIT]; - uint32_t iov_count; - uint64_t op_flags; - size_t bytes_done; - void *map_ptr; - struct smr_ep_name *map_name; - struct ofi_mr *mr[SMR_IOV_LIMIT]; + int64_t peer_id; + void *context; + struct iovec iov[SMR_IOV_LIMIT]; + uint32_t iov_count; + uint64_t op_flags; + size_t bytes_done; + void *map_ptr; + struct smr_ep_name *map_name; + struct ofi_mr *mr[SMR_IOV_LIMIT]; }; struct smr_pend_entry { struct dlist_entry entry; - struct smr_cmd cmd; + struct smr_cmd *cmd; struct fi_peer_rx_entry *rx_entry; struct smr_cmd_ctx *cmd_ctx; size_t bytes_done; @@ -160,8 +506,9 @@ struct smr_pend_entry { struct smr_cmd_ctx { struct dlist_entry entry; struct smr_ep *ep; - struct smr_cmd cmd; - struct smr_pend_entry *sar_entry; + struct smr_cmd *cmd; + struct smr_cmd cmd_cpy; + char msg[SMR_MSG_DATA_LEN]; struct slist buf_list; }; @@ -199,62 +546,11 @@ static inline void *smr_get_ptr(void *base, uint64_t offset) return (char *) base + (uintptr_t) offset; } -struct smr_sock_name { - char name[SMR_SOCK_NAME_MAX]; - struct dlist_entry entry; -}; - -enum smr_cmap_state { - SMR_CMAP_INIT = 0, - SMR_CMAP_SUCCESS, - SMR_CMAP_FAILED, -}; - -struct smr_cmap_entry { - enum smr_cmap_state state; - int *device_fds; -}; - -struct smr_sock_info { - char name[SMR_SOCK_NAME_MAX]; - int listen_sock; - ofi_epoll_t epollfd; - struct fd_signal signal; - pthread_t listener_thread; - int *my_fds; - int nfds; - struct smr_cmap_entry peers[SMR_MAX_PEERS]; -}; - struct smr_unexp_buf { struct slist_entry entry; char buf[SMR_SAR_SIZE]; }; -struct smr_ep { - struct util_ep util_ep; - size_t tx_size; - size_t rx_size; - const char *name; - uint64_t msg_id; - struct smr_region *volatile region; - struct fid_peer_srx *srx; - struct ofi_bufpool *cmd_ctx_pool; - struct ofi_bufpool *unexp_buf_pool; - struct ofi_bufpool *pend_buf_pool; - - struct smr_tx_fs *tx_fs; - struct dlist_entry sar_list; - struct dlist_entry ipc_cpy_pend_list; - size_t min_multi_recv_size; - - int ep_idx; - enum ofi_shm_p2p_type p2p_type; - struct smr_sock_info *sock_info; - void *dsa_context; - void (*smr_progress_ipc_list)(struct smr_ep *ep); -}; - #define smr_ep_rx_flags(smr_ep) ((smr_ep)->util_ep.rx_op_flags) #define smr_ep_tx_flags(smr_ep) ((smr_ep)->util_ep.tx_op_flags) @@ -266,8 +562,7 @@ static inline int smr_mmap_name(char *shm_name, const char *ep_name, } int smr_endpoint(struct fid_domain *domain, struct fi_info *info, - struct fid_ep **ep, void *context); -void smr_ep_exchange_fds(struct smr_ep *ep, int64_t id); + struct fid_ep **ep, void *context); int smr_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, struct fid_cq **cq_fid, void *context); @@ -276,29 +571,29 @@ int smr_cntr_open(struct fid_domain *domain, struct fi_cntr_attr *attr, int64_t smr_verify_peer(struct smr_ep *ep, fi_addr_t fi_addr); -void smr_format_pend_resp(struct smr_tx_entry *pend, struct smr_cmd *cmd, - void *context, struct ofi_mr **mr, - const struct iovec *iov, uint32_t iov_count, - uint64_t op_flags, int64_t id, struct smr_resp *resp); +void smr_format_pend(struct smr_tx_entry *pend, void *context, + struct ofi_mr **mr, const struct iovec *iov, + uint32_t iov_count, uint64_t op_flags, int64_t id); void smr_generic_format(struct smr_cmd *cmd, int64_t peer_id, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags); -size_t smr_copy_to_sar(struct smr_freestack *sar_pool, struct smr_resp *resp, +size_t smr_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, struct smr_cmd *cmd, struct ofi_mr **mr, const struct iovec *iov, size_t count, size_t *bytes_done); -size_t smr_copy_from_sar(struct smr_freestack *sar_pool, struct smr_resp *resp, +size_t smr_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, struct smr_cmd *cmd, struct ofi_mr **mr, const struct iovec *iov, size_t count, size_t *bytes_done); int smr_select_proto(void **desc, size_t iov_count, bool cma_avail, bool ipc_valid, uint32_t op, uint64_t total_len, uint64_t op_flags); -typedef ssize_t (*smr_proto_func)(struct smr_ep *ep, struct smr_region *peer_smr, +typedef ssize_t (*smr_proto_func)( + struct smr_ep *ep, struct smr_region *peer_smr, int64_t id, int64_t peer_id, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags, struct ofi_mr **desc, const struct iovec *iov, size_t iov_count, size_t total_len, void *context, struct smr_cmd *cmd); -extern smr_proto_func smr_proto_ops[smr_src_max]; +extern smr_proto_func smr_proto_ops[smr_proto_max]; int smr_write_err_comp(struct util_cq *cq, void *context, uint64_t flags, uint64_t tag, int err); @@ -328,13 +623,17 @@ static inline bool smr_vma_enabled(struct smr_ep *ep, peer_smr->xpmem_cap_self == SMR_VMA_CAP_ON); } -static inline void smr_set_ipc_valid(struct smr_region *region, uint64_t id) +static inline void smr_set_ipc_valid(struct smr_ep *ep, uint64_t id) { + struct smr_av *av; + + av = container_of(ep->util_ep.av, struct smr_av, util_av); + if (ofi_hmem_is_initialized(FI_HMEM_ZE) && - region->map->peers[id].pid_fd == -1) - smr_peer_data(region)[id].ipc_valid = 0; + av->smr_map.peers[id].pid_fd == -1) + smr_peer_data(ep->region)[id].ipc_valid = 0; else - smr_peer_data(region)[id].ipc_valid = 1; + smr_peer_data(ep->region)[id].ipc_valid = 1; } static inline bool smr_ipc_valid(struct smr_ep *ep, struct smr_region *peer_smr, @@ -344,36 +643,6 @@ static inline bool smr_ipc_valid(struct smr_ep *ep, struct smr_region *peer_smr, smr_peer_data(peer_smr)[peer_id].ipc_valid); } -static inline bool smr_ze_ipc_enabled(struct smr_region *smr, - struct smr_region *peer_smr) -{ - return (smr->flags & SMR_FLAG_IPC_SOCK) && - (peer_smr->flags & SMR_FLAG_IPC_SOCK); -} - -static inline struct smr_inject_buf * -smr_get_txbuf(struct smr_region *smr) -{ - struct smr_inject_buf *txbuf; - - pthread_spin_lock(&smr->lock); - if (!smr_freestack_isempty(smr_inject_pool(smr))) - txbuf = smr_freestack_pop(smr_inject_pool(smr)); - else - txbuf = NULL; - pthread_spin_unlock(&smr->lock); - return txbuf; -} - -static inline void -smr_release_txbuf(struct smr_region *smr, - struct smr_inject_buf *tx_buf) -{ - pthread_spin_lock(&smr->lock); - smr_freestack_push(smr_inject_pool(smr), tx_buf); - pthread_spin_unlock(&smr->lock); -} - int smr_unexp_start(struct fi_peer_rx_entry *rx_entry); void smr_progress_ipc_list(struct smr_ep *ep); @@ -386,13 +655,11 @@ static inline void smr_progress_ipc_list_noop(struct smr_ep *ep) void smr_dsa_init(void); void smr_dsa_cleanup(void); size_t smr_dsa_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_resp *resp, struct smr_cmd *cmd, - const struct iovec *iov, size_t count, size_t *bytes_done, - void *entry_ptr); + struct smr_cmd *cmd, const struct iovec *iov, + size_t count, size_t *bytes_done); size_t smr_dsa_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_resp *resp, struct smr_cmd *cmd, - const struct iovec *iov, size_t count, size_t *bytes_done, - void *entry_ptr); + struct smr_cmd *cmd, const struct iovec *iov, + size_t count, size_t *bytes_done); void smr_dsa_context_init(struct smr_ep *ep); void smr_dsa_context_cleanup(struct smr_ep *ep); void smr_dsa_progress(struct smr_ep *ep); diff --git a/prov/shm/src/smr_atomic.c b/prov/shm/src/smr_atomic.c index 79478b2bf9e..24c1f5dd090 100644 --- a/prov/shm/src/smr_atomic.c +++ b/prov/shm/src/smr_atomic.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018 Intel Corporation. All rights reserved + * Copyright (c) Intel Corporation. All rights reserved * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -32,7 +32,8 @@ #include "smr.h" -static void smr_format_rma_ioc(struct smr_cmd *cmd, const struct fi_rma_ioc *rma_ioc, +static void smr_format_rma_ioc(struct smr_cmd *cmd, + const struct fi_rma_ioc *rma_ioc, size_t ioc_count) { cmd->rma.rma_count = ioc_count; @@ -42,18 +43,17 @@ static void smr_format_rma_ioc(struct smr_cmd *cmd, const struct fi_rma_ioc *rma static void smr_generic_atomic_format(struct smr_cmd *cmd, uint8_t datatype, uint8_t atomic_op) { - cmd->msg.hdr.datatype = datatype; - cmd->msg.hdr.atomic_op = atomic_op; + cmd->hdr.datatype = datatype; + cmd->hdr.atomic_op = atomic_op; } static void smr_format_inline_atomic(struct smr_cmd *cmd, struct ofi_mr **mr, const struct iovec *iov, size_t count) { - cmd->msg.hdr.op_src = smr_src_inline; - - cmd->msg.hdr.size = ofi_copy_from_mr_iov(cmd->msg.data.msg, - SMR_MSG_DATA_LEN, mr, - iov, count, 0); + cmd->hdr.proto = smr_proto_inline; + cmd->hdr.tx_ctx = 0; + cmd->hdr.size = ofi_copy_from_mr_iov(cmd->data.msg, SMR_MSG_DATA_LEN, + mr, iov, count, 0); } static void smr_do_atomic_inline(struct smr_ep *ep, struct smr_region *peer_smr, @@ -76,30 +76,31 @@ static void smr_format_inject_atomic(struct smr_cmd *cmd, struct ofi_mr **desc, { size_t comp_size; - cmd->msg.hdr.op_src = smr_src_inject; - cmd->msg.hdr.src_data = smr_get_offset(smr, tx_buf); + cmd->hdr.proto = smr_proto_inject; + cmd->hdr.proto_data = smr_get_offset(smr, tx_buf); - switch (cmd->msg.hdr.op) { + switch (cmd->hdr.op) { case ofi_op_atomic: - cmd->msg.hdr.size = ofi_copy_from_mr_iov(tx_buf->data, + cmd->hdr.size = ofi_copy_from_mr_iov(tx_buf->data, SMR_INJECT_SIZE, desc, iov, count, 0); break; case ofi_op_atomic_fetch: - if (cmd->msg.hdr.atomic_op == FI_ATOMIC_READ) - cmd->msg.hdr.size = ofi_total_iov_len(resultv, result_count); + if (cmd->hdr.atomic_op == FI_ATOMIC_READ) + cmd->hdr.size = ofi_total_iov_len(resultv, + result_count); else - cmd->msg.hdr.size = ofi_copy_from_mr_iov(tx_buf->data, + cmd->hdr.size = ofi_copy_from_mr_iov(tx_buf->data, SMR_INJECT_SIZE, desc, iov, count, 0); break; case ofi_op_atomic_compare: - cmd->msg.hdr.size = ofi_copy_from_mr_iov(tx_buf->buf, + cmd->hdr.size = ofi_copy_from_mr_iov(tx_buf->buf, SMR_COMP_INJECT_SIZE, desc, iov, count, 0); comp_size = ofi_copy_from_mr_iov(tx_buf->comp, SMR_COMP_INJECT_SIZE, comp_desc, compv, comp_count, 0); - if (comp_size != cmd->msg.hdr.size) + if (comp_size != cmd->hdr.size) FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "atomic and compare buffer size mismatch\n"); break; @@ -109,7 +110,8 @@ static void smr_format_inject_atomic(struct smr_cmd *cmd, struct ofi_mr **desc, } } -static ssize_t smr_do_atomic_inject(struct smr_ep *ep, struct smr_region *peer_smr, +static ssize_t smr_do_atomic_inject( + struct smr_ep *ep, struct smr_region *peer_smr, int64_t id, int64_t peer_id, uint32_t op, uint64_t op_flags, uint8_t datatype, uint8_t atomic_op, struct ofi_mr **desc, const struct iovec *iov, @@ -121,33 +123,27 @@ static ssize_t smr_do_atomic_inject(struct smr_ep *ep, struct smr_region *peer_s { struct smr_inject_buf *tx_buf; struct smr_tx_entry *pend; - struct smr_resp *resp; - tx_buf = smr_get_txbuf(peer_smr); - if (!tx_buf) - return -FI_EAGAIN; + tx_buf = smr_freestack_pop(smr_inject_pool(ep->region)); + assert(tx_buf); smr_generic_format(cmd, peer_id, op, 0, 0, op_flags); smr_generic_atomic_format(cmd, datatype, atomic_op); smr_format_inject_atomic(cmd, desc, iov, iov_count, resultv, result_count, comp_desc, compv, comp_count, - peer_smr, tx_buf); + ep->region, tx_buf); - if (smr_flags & SMR_RMA_REQ || op_flags & FI_DELIVERY_COMPLETE) { - if (ofi_cirque_isfull(smr_resp_queue(ep->region))) { - smr_release_txbuf(peer_smr, tx_buf); - return -FI_EAGAIN; - } - resp = ofi_cirque_next(smr_resp_queue(ep->region)); + if (op == ofi_op_atomic_fetch || op == ofi_op_atomic_compare || + atomic_op == FI_ATOMIC_READ || op_flags & FI_DELIVERY_COMPLETE) { pend = ofi_freestack_pop(ep->tx_fs); - smr_format_pend_resp(pend, cmd, context, res_desc, resultv, - result_count, op_flags, id, resp); - cmd->msg.hdr.data = smr_get_offset(ep->region, resp); - ofi_cirque_commit(smr_resp_queue(ep->region)); + assert(pend); + cmd->hdr.tx_ctx = (uintptr_t) pend; + smr_format_pend(pend, context, res_desc, resultv, + result_count, op_flags, id); + } else { + cmd->hdr.tx_ctx = 0; } - cmd->msg.hdr.op_flags |= smr_flags; - return FI_SUCCESS; } @@ -156,13 +152,14 @@ static int smr_select_atomic_proto(uint32_t op, uint64_t total_len, { if (op == ofi_op_atomic_compare || op == ofi_op_atomic_fetch || op_flags & FI_DELIVERY_COMPLETE || total_len > SMR_MSG_DATA_LEN) - return smr_src_inject; + return smr_proto_inject; - return smr_src_inline; + return smr_proto_inline; } -static ssize_t smr_generic_atomic(struct smr_ep *ep, - const struct fi_ioc *ioc, void **desc, size_t count, +static ssize_t smr_generic_atomic( + struct smr_ep *ep, const struct fi_ioc *ioc, + void **desc, size_t count, const struct fi_ioc *compare_ioc, void **compare_desc, size_t compare_count, struct fi_ioc *result_ioc, void **result_desc, size_t result_count, @@ -172,16 +169,16 @@ static ssize_t smr_generic_atomic(struct smr_ep *ep, uint64_t op_flags) { struct smr_cmd_entry *ce; + struct smr_cmd *cmd; struct smr_region *peer_smr; struct iovec iov[SMR_IOV_LIMIT]; struct iovec compare_iov[SMR_IOV_LIMIT]; struct iovec result_iov[SMR_IOV_LIMIT]; uint16_t smr_flags = 0; - int64_t id, peer_id; + int64_t id, peer_id, pos; int proto; ssize_t ret = 0; size_t total_len; - int64_t pos; assert(count <= SMR_IOV_LIMIT); assert(result_count <= SMR_IOV_LIMIT); @@ -192,8 +189,8 @@ static ssize_t smr_generic_atomic(struct smr_ep *ep, if (id < 0) return -FI_EAGAIN; - peer_id = smr_peer_data(ep->region)[id].addr.id; - peer_smr = smr_peer_region(ep->region, id); + peer_id = smr_peer_data(ep->region)[id].id; + peer_smr = smr_peer_region(ep, id); if (smr_peer_data(ep->region)[id].sar_status) return -FI_EAGAIN; @@ -216,12 +213,12 @@ static ssize_t smr_generic_atomic(struct smr_ep *ep, assert(result_ioc); ofi_ioc_to_iov(result_ioc, result_iov, result_count, ofi_datatype_size(datatype)); - smr_flags = SMR_RMA_REQ; /* fall through */ case ofi_op_atomic: if (atomic_op != FI_ATOMIC_READ) { assert(ioc); - ofi_ioc_to_iov(ioc, iov, count, ofi_datatype_size(datatype)); + ofi_ioc_to_iov(ioc, iov, count, + ofi_datatype_size(datatype)); } else { count = 0; } @@ -233,26 +230,37 @@ static ssize_t smr_generic_atomic(struct smr_ep *ep, proto = smr_select_atomic_proto(op, total_len, op_flags); - if (proto == smr_src_inline) { + if (proto == smr_proto_inline) { + cmd = &ce->cmd; + ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) cmd); smr_do_atomic_inline(ep, peer_smr, id, peer_id, ofi_op_atomic, op_flags, datatype, atomic_op, (struct ofi_mr **) desc, iov, count, - total_len, &ce->cmd); + total_len, cmd); } else { + if (smr_freestack_isempty(smr_cmd_stack(ep->region))) { + smr_cmd_queue_discard(ce, pos); + ret = -FI_EAGAIN; + goto unlock; + } + + cmd = smr_freestack_pop(smr_cmd_stack(ep->region)); + assert(cmd); + ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); ret = smr_do_atomic_inject(ep, peer_smr, id, peer_id, op, op_flags, datatype, atomic_op, (struct ofi_mr **) desc, iov, count, (struct ofi_mr **) result_desc, result_iov, result_count, (struct ofi_mr **) compare_desc, compare_iov, compare_count, total_len, context, - smr_flags, &ce->cmd); + smr_flags, cmd); if (ret) { smr_cmd_queue_discard(ce, pos); goto unlock; } } - if (!(smr_flags & SMR_RMA_REQ) && !(op_flags & FI_DELIVERY_COMPLETE)) { + if (!cmd->hdr.tx_ctx) { ret = smr_complete_tx(ep, context, op, op_flags); if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -260,7 +268,7 @@ static ssize_t smr_generic_atomic(struct smr_ep *ep, } } - smr_format_rma_ioc(&ce->rma_cmd, rma_ioc, rma_count); + smr_format_rma_ioc(cmd, rma_ioc, rma_count); smr_cmd_queue_commit(ce, pos); unlock: ofi_genlock_unlock(&ep->util_ep.lock); @@ -278,7 +286,8 @@ static ssize_t smr_atomic_writemsg(struct fid_ep *ep_fid, NULL, NULL, 0, NULL, NULL, 0, msg->addr, msg->rma_iov, msg->rma_iov_count, msg->datatype, msg->op, msg->context, - ofi_op_atomic, flags | ep->util_ep.tx_msg_flags); + ofi_op_atomic, + flags | ep->util_ep.tx_msg_flags); } static ssize_t smr_atomic_writev(struct fid_ep *ep_fid, @@ -297,13 +306,15 @@ static ssize_t smr_atomic_writev(struct fid_ep *ep_fid, return smr_generic_atomic(ep, iov, desc, count, NULL, NULL, 0, NULL, NULL, 0, dest_addr, &rma_iov, 1, datatype, - op, context, ofi_op_atomic, smr_ep_tx_flags(ep)); + op, context, ofi_op_atomic, + smr_ep_tx_flags(ep)); } -static ssize_t smr_atomic_write(struct fid_ep *ep_fid, const void *buf, size_t count, - void *desc, fi_addr_t dest_addr, uint64_t addr, - uint64_t key, enum fi_datatype datatype, enum fi_op op, - void *context) +static ssize_t smr_atomic_write(struct fid_ep *ep_fid, const void *buf, + size_t count, void *desc, fi_addr_t dest_addr, + uint64_t addr, uint64_t key, + enum fi_datatype datatype, enum fi_op op, + void *context) { struct smr_ep *ep; struct fi_ioc iov; @@ -318,9 +329,9 @@ static ssize_t smr_atomic_write(struct fid_ep *ep_fid, const void *buf, size_t c rma_iov.count = count; rma_iov.key = key; - return smr_generic_atomic(ep, &iov, &desc, 1, NULL, NULL, 0, NULL, NULL, 0, - dest_addr, &rma_iov, 1, datatype, op, context, - ofi_op_atomic, smr_ep_tx_flags(ep)); + return smr_generic_atomic(ep, &iov, &desc, 1, NULL, NULL, 0, NULL, NULL, + 0, dest_addr, &rma_iov, 1, datatype, op, + context, ofi_op_atomic, smr_ep_tx_flags(ep)); } static ssize_t smr_atomic_inject(struct fid_ep *ep_fid, const void *buf, @@ -328,14 +339,14 @@ static ssize_t smr_atomic_inject(struct fid_ep *ep_fid, const void *buf, uint64_t key, enum fi_datatype datatype, enum fi_op op) { struct smr_cmd_entry *ce; + struct smr_cmd *cmd; struct smr_ep *ep; struct smr_region *peer_smr; struct iovec iov; struct fi_rma_ioc rma_ioc; - int64_t id, peer_id; - ssize_t ret = 0; + int64_t id, peer_id, pos; + ssize_t ret = -FI_EAGAIN; size_t total_len; - int64_t pos; ep = container_of(ep_fid, struct smr_ep, util_ep.ep_fid.fid); @@ -343,17 +354,21 @@ static ssize_t smr_atomic_inject(struct fid_ep *ep_fid, const void *buf, if (id < 0) return -FI_EAGAIN; - peer_id = smr_peer_data(ep->region)[id].addr.id; - peer_smr = smr_peer_region(ep->region, id); + peer_id = smr_peer_data(ep->region)[id].id; + peer_smr = smr_peer_region(ep, id); + + ofi_genlock_lock(&ep->util_ep.lock); if (smr_peer_data(ep->region)[id].sar_status) { ret = -FI_EAGAIN; - goto out; + goto unlock; } ret = smr_cmd_queue_next(smr_cmd_queue(peer_smr), &ce, &pos); - if (ret == -FI_ENOENT) - return -FI_EAGAIN; + if (ret == -FI_ENOENT) { + ret = -FI_EAGAIN; + goto unlock; + } total_len = count * ofi_datatype_size(datatype); assert(total_len <= SMR_INJECT_SIZE); @@ -366,24 +381,36 @@ static ssize_t smr_atomic_inject(struct fid_ep *ep_fid, const void *buf, rma_ioc.key = key; if (total_len <= SMR_MSG_DATA_LEN) { + cmd = &ce->cmd; + ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) cmd); smr_do_atomic_inline(ep, peer_smr, id, peer_id, ofi_op_atomic, 0, datatype, op, NULL, &iov, 1, total_len, &ce->cmd); - } else if (total_len <= SMR_INJECT_SIZE) { + } else { + if (smr_freestack_isempty(smr_cmd_stack(ep->region))) { + smr_cmd_queue_discard(ce, pos); + ret = -FI_EAGAIN; + goto unlock; + } + + cmd = smr_freestack_pop(smr_cmd_stack(ep->region)); + assert(cmd); + ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); ret = smr_do_atomic_inject(ep, peer_smr, id, peer_id, ofi_op_atomic, 0, datatype, op, NULL, &iov, 1, NULL, NULL, 0, NULL, NULL, 0, total_len, NULL, - 0, &ce->cmd); + 0, cmd); if (ret) { smr_cmd_queue_discard(ce, pos); - goto out; + goto unlock; } } - smr_format_rma_ioc(&ce->rma_cmd, &rma_ioc, 1); + smr_format_rma_ioc(cmd, &rma_ioc, 1); smr_cmd_queue_commit(ce, pos); ofi_ep_peer_tx_cntr_inc(&ep->util_ep, ofi_op_atomic); -out: +unlock: + ofi_genlock_unlock(&ep->util_ep.lock); return ret; } diff --git a/prov/shm/src/smr_attr.c b/prov/shm/src/smr_attr.c index c1e987ec50d..368847539b2 100644 --- a/prov/shm/src/smr_attr.c +++ b/prov/shm/src/smr_attr.c @@ -101,7 +101,7 @@ struct fi_domain_attr smr_domain_attr = { .av_type = FI_AV_UNSPEC, .mr_mode = OFI_MR_BASIC | OFI_MR_SCALABLE, .mr_key_size = sizeof_field(struct fi_rma_iov, key), - .cq_data_size = sizeof_field(struct smr_msg_hdr, data), + .cq_data_size = sizeof_field(struct smr_cmd_hdr, cq_data), .cq_cnt = (1 << 10), .ep_cnt = SMR_MAX_PEERS, .tx_ctx_cnt = (1 << 10), @@ -121,7 +121,7 @@ struct fi_domain_attr smr_hmem_domain_attr = { .av_type = FI_AV_UNSPEC, .mr_mode = FI_MR_HMEM, .mr_key_size = sizeof_field(struct fi_rma_iov, key), - .cq_data_size = sizeof_field(struct smr_msg_hdr, data), + .cq_data_size = sizeof_field(struct smr_cmd_hdr, cq_data), .cq_cnt = (1 << 10), .ep_cnt = SMR_MAX_PEERS, .tx_ctx_cnt = (1 << 10), diff --git a/prov/shm/src/smr_av.c b/prov/shm/src/smr_av.c index 61e4344bde5..2d2fcb83d23 100644 --- a/prov/shm/src/smr_av.c +++ b/prov/shm/src/smr_av.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2020 Intel Corporation. All rights reserved. + * Copyright (c) Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -32,19 +32,13 @@ #include "smr.h" -static void smr_peer_addr_init(struct smr_addr *peer) -{ - memset(peer->name, 0, SMR_NAME_MAX); - peer->id = -1; -} - static int smr_name_compare(struct ofi_rbmap *map, void *key, void *data) { struct smr_map *smr_map; smr_map = container_of(map, struct smr_map, rbmap); - return strncmp(smr_map->peers[(uintptr_t) data].peer.name, + return strncmp(smr_map->peers[(uintptr_t) data].name, (char *) key, SMR_NAME_MAX); } @@ -54,7 +48,8 @@ static int smr_map_init(const struct fi_provider *prov, struct smr_map *map, int i; for (i = 0; i < peer_count; i++) { - smr_peer_addr_init(&map->peers[i].peer); + memset(&map->peers[i].name, 0, SMR_NAME_MAX); + map->peers[i].id_assigned = 0; map->peers[i].fiaddr = FI_ADDR_NOTAVAIL; } map->flags = flags; @@ -70,10 +65,8 @@ static void smr_map_cleanup(struct smr_map *map) int64_t i; for (i = 0; i < SMR_MAX_PEERS; i++) { - if (map->peers[i].peer.id < 0) - continue; - - smr_map_del(map, i); + if (map->peers[i].id_assigned) + smr_map_del(map, i); } ofi_rbmap_cleanup(&map->rbmap); } @@ -100,15 +93,13 @@ static int smr_av_close(struct fid *fid) static fi_addr_t smr_get_addr(struct fi_peer_rx_entry *rx_entry) { struct smr_cmd_ctx *cmd_ctx = rx_entry->peer_context; + struct smr_av *av; - return cmd_ctx->ep->region->map->peers[cmd_ctx->cmd.msg.hdr.id].fiaddr; -} + av = container_of(cmd_ctx->ep->util_ep.av, struct smr_av, util_av); + return av->smr_map.peers[cmd_ctx->cmd->hdr.id].fiaddr; +} -/* - * Input address: smr name (string) - * output address: index (fi_addr_t), the output from util_av - */ static int smr_av_insert(struct fid_av *av_fid, const void *addr, size_t count, fi_addr_t *fi_addr, uint64_t flags, void *context) { @@ -145,7 +136,8 @@ static int smr_av_insert(struct fid_av *av_fid, const void *addr, size_t count, ret = -FI_ENOMEM; } - FI_INFO(&smr_prov, FI_LOG_AV, "fi_addr: %" PRIu64 "\n", util_addr); + FI_INFO(&smr_prov, FI_LOG_AV, "fi_addr: %" PRIu64 "\n", + util_addr); if (ret) { if (fi_addr) @@ -174,7 +166,8 @@ static int smr_av_insert(struct fid_av *av_fid, const void *addr, size_t count, av_entry); smr_ep = container_of(util_ep, struct smr_ep, util_ep); smr_ep->region->max_sar_buf_per_peer = - SMR_MAX_PEERS / smr_av->smr_map.num_peers; + MIN(SMR_BUF_BATCH_MAX, + SMR_MAX_PEERS / smr_av->smr_map.num_peers); smr_ep->srx->owner_ops->foreach_unspec_addr(smr_ep->srx, &smr_get_addr); } @@ -184,8 +177,8 @@ static int smr_av_insert(struct fid_av *av_fid, const void *addr, size_t count, return succ_count; } -static int smr_av_remove(struct fid_av *av_fid, fi_addr_t *fi_addr, size_t count, - uint64_t flags) +static int smr_av_remove(struct fid_av *av_fid, fi_addr_t *fi_addr, + size_t count, uint64_t flags) { struct util_av *util_av; struct util_ep *util_ep; @@ -211,7 +204,8 @@ static int smr_av_remove(struct fid_av *av_fid, fi_addr_t *fi_addr, size_t count smr_map_del(&smr_av->smr_map, id); dlist_foreach(&util_av->ep_list, av_entry) { - util_ep = container_of(av_entry, struct util_ep, av_entry); + util_ep = container_of(av_entry, struct util_ep, + av_entry); smr_ep = container_of(util_ep, struct smr_ep, util_ep); if (smr_av->smr_map.num_peers > 0) smr_ep->region->max_sar_buf_per_peer = @@ -240,7 +234,7 @@ static int smr_av_lookup(struct fid_av *av, fi_addr_t fi_addr, void *addr, smr_av = container_of(util_av, struct smr_av, util_av); id = smr_addr_lookup(util_av, fi_addr); - name = smr_av->smr_map.peers[id].peer.name; + name = smr_av->smr_map.peers[id].name; strncpy((char *) addr, name, *addrlen); @@ -315,7 +309,8 @@ int smr_av_open(struct fid_domain *domain, struct fi_av_attr *attr, goto out; } - ret = ofi_av_init(util_domain, attr, &util_attr, &smr_av->util_av, context); + ret = ofi_av_init(util_domain, attr, &util_attr, &smr_av->util_av, + context); if (ret) goto out; diff --git a/prov/shm/src/smr_cntr.c b/prov/shm/src/smr_cntr.c index a499d0c2531..2d02e314dbd 100644 --- a/prov/shm/src/smr_cntr.c +++ b/prov/shm/src/smr_cntr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Intel Corporation. All rights reserved. + * Copyright (c) Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/prov/shm/src/smr_comp.c b/prov/shm/src/smr_comp.c index b50e787fcd6..62d9a4d26ed 100644 --- a/prov/shm/src/smr_comp.c +++ b/prov/shm/src/smr_comp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018 Intel Corporation. All rights reserved + * Copyright (c) Intel Corporation. All rights reserved * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -63,6 +63,8 @@ int smr_complete_rx(struct smr_ep *ep, void *context, uint32_t op, uint64_t flags, size_t len, void *buf, int64_t id, uint64_t tag, uint64_t data) { + struct smr_av *av; + ofi_ep_peer_rx_cntr_inc(&ep->util_ep, op); if (!(flags & (FI_REMOTE_CQ_DATA | FI_COMPLETION))) @@ -70,6 +72,7 @@ int smr_complete_rx(struct smr_ep *ep, void *context, uint32_t op, flags &= ~FI_COMPLETION; + av = container_of(ep->util_ep.av, struct smr_av, util_av); return ofi_peer_cq_write(ep->util_ep.rx_cq, context, flags, len, buf, - data, tag, ep->region->map->peers[id].fiaddr); + data, tag, av->smr_map.peers[id].fiaddr); } \ No newline at end of file diff --git a/prov/shm/src/smr_cq.c b/prov/shm/src/smr_cq.c index 0d345bfa312..084dda8fa7a 100644 --- a/prov/shm/src/smr_cq.c +++ b/prov/shm/src/smr_cq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2017 Intel Corporation. All rights reserved. + * Copyright (c) Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/prov/shm/src/smr_domain.c b/prov/shm/src/smr_domain.c index a5ff16c401d..72a3be5c40a 100644 --- a/prov/shm/src/smr_domain.c +++ b/prov/shm/src/smr_domain.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Intel Corporation, Inc. All rights reserved. + * Copyright (c) Intel Corporation, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -123,7 +123,8 @@ static int smr_domain_close(fid_t fid) int ret; struct smr_domain *domain; - domain = container_of(fid, struct smr_domain, util_domain.domain_fid.fid); + domain = container_of(fid, struct smr_domain, + util_domain.domain_fid.fid); if (domain->ipc_cache) ofi_ipc_cache_destroy(domain->ipc_cache); @@ -173,13 +174,15 @@ int smr_domain_open(struct fid_fabric *fabric, struct fi_info *info, return ret; } - smr_fabric = container_of(fabric, struct smr_fabric, util_fabric.fabric_fid); + smr_fabric = container_of(fabric, struct smr_fabric, + util_fabric.fabric_fid); ofi_mutex_lock(&smr_fabric->util_fabric.lock); smr_domain->fast_rma = smr_fast_rma_enabled(info->domain_attr->mr_mode, info->tx_attr->msg_order); ofi_mutex_unlock(&smr_fabric->util_fabric.lock); - ret = ofi_ipc_cache_open(&smr_domain->ipc_cache, &smr_domain->util_domain); + ret = ofi_ipc_cache_open(&smr_domain->ipc_cache, + &smr_domain->util_domain); if (ret) { free(smr_domain); return ret; diff --git a/prov/shm/src/smr_dsa.c b/prov/shm/src/smr_dsa.c index 328f7117b5c..e6bc17b0cee 100644 --- a/prov/shm/src/smr_dsa.c +++ b/prov/shm/src/smr_dsa.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Intel Corporation. All rights reserved + * Copyright (c) Intel Corporation. All rights reserved * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -520,11 +520,11 @@ static void dsa_prepare_copy_desc(struct dsa_hw_desc *desc, } static void smr_dsa_copy_sar(struct smr_freestack *sar_pool, - struct smr_dsa_context *dsa_context, - struct dsa_cmd_context *dsa_cmd_context, - struct smr_resp *resp, struct smr_cmd *cmd, - const struct iovec *iov, size_t count, - size_t *bytes_done, struct smr_region *region) + struct smr_dsa_context *dsa_context, + struct dsa_cmd_context *dsa_cmd_context, + struct smr_cmd *cmd, const struct iovec *iov, + size_t count, size_t *bytes_done, + struct smr_region *region) { struct smr_sar_buf *smr_sar_buf; size_t remaining_sar_size; @@ -596,8 +596,6 @@ static void smr_dsa_copy_sar(struct smr_freestack *sar_pool, } assert(dsa_bytes_pending > 0); - resp->status = SMR_STATUS_BUSY; - dsa_cmd_context->bytes_in_progress = dsa_bytes_pending; dsa_context->copy_type_stats[dsa_cmd_context->dir]++; dsa_cmd_context->op = cmd->msg.hdr.op; @@ -641,17 +639,16 @@ dsa_process_partially_completed_desc(struct smr_dsa_context *dsa_context, static void dsa_update_tx_entry(struct smr_region *smr, struct dsa_cmd_context *dsa_cmd_context) { - struct smr_resp *resp; struct smr_cmd *cmd; struct smr_tx_entry *tx_entry = dsa_cmd_context->entry_ptr; tx_entry->bytes_done += dsa_cmd_context->bytes_in_progress; cmd = &tx_entry->cmd; - resp = smr_get_ptr(smr, cmd->msg.hdr.src_data); + //resp = smr_get_ptr(smr, cmd->msg.hdr.src_data); - assert(resp->status == SMR_STATUS_BUSY); - resp->status = (dsa_cmd_context->dir == OFI_COPY_IOV_TO_BUF ? - SMR_STATUS_SAR_FULL : SMR_STATUS_SAR_EMPTY); + // assert(resp->status == SMR_STATUS_BUSY); + // resp->status = (dsa_cmd_context->dir == OFI_COPY_IOV_TO_BUF ? + // SMR_STATUS_SAR_FULL : SMR_STATUS_SAR_EMPTY); } static void dsa_update_sar_entry(struct smr_region *smr, @@ -659,17 +656,16 @@ static void dsa_update_sar_entry(struct smr_region *smr, { struct smr_pend_entry *sar_entry = dsa_cmd_context->entry_ptr; struct smr_region *peer_smr; - struct smr_resp *resp; struct smr_cmd *cmd; sar_entry->bytes_done += dsa_cmd_context->bytes_in_progress; cmd = &sar_entry->cmd; peer_smr = smr_peer_region(smr, cmd->msg.hdr.id); - resp = smr_get_ptr(peer_smr, cmd->msg.hdr.src_data); + // resp = smr_get_ptr(peer_smr, cmd->msg.hdr.src_data); - assert(resp->status == SMR_STATUS_BUSY); - resp->status = (dsa_cmd_context->dir == OFI_COPY_IOV_TO_BUF ? - SMR_STATUS_SAR_FULL : SMR_STATUS_SAR_EMPTY); + // assert(resp->status == SMR_STATUS_BUSY); + // resp->status = (dsa_cmd_context->dir == OFI_COPY_IOV_TO_BUF ? + // SMR_STATUS_SAR_FULL : SMR_STATUS_SAR_EMPTY); } static void dsa_process_complete_work(struct smr_region *smr, @@ -995,55 +991,47 @@ void smr_dsa_progress(struct smr_ep *ep) if (dsa_cmd_completed) dsa_process_complete_work(ep->region, dsa_cmd_context, - dsa_context); + dsa_context); } pthread_spin_unlock(&ep->region->lock); } size_t smr_dsa_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_resp *resp, struct smr_cmd *cmd, - const struct iovec *iov, size_t count, size_t *bytes_done, - void *entry_ptr) + struct smr_cmd *cmd, const struct iovec *iov, size_t count, + size_t *bytes_done) { struct dsa_cmd_context *dsa_cmd_context; assert(smr_env.use_dsa_sar); - if (resp->status != SMR_STATUS_SAR_EMPTY) - return -FI_EAGAIN; - dsa_cmd_context = dsa_allocate_cmd_context(ep->dsa_context); if (!dsa_cmd_context) return -FI_ENOMEM; dsa_cmd_context->dir = OFI_COPY_IOV_TO_BUF; dsa_cmd_context->entry_ptr = entry_ptr; - smr_dsa_copy_sar(sar_pool, ep->dsa_context, dsa_cmd_context, resp, - cmd, iov, count, bytes_done, ep->region); + smr_dsa_copy_sar(sar_pool, ep->dsa_context, dsa_cmd_context, cmd, iov, + count, bytes_done, ep->region); return FI_SUCCESS; } size_t smr_dsa_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_resp *resp, struct smr_cmd *cmd, - const struct iovec *iov, size_t count, size_t *bytes_done, - void *entry_ptr) + struct smr_cmd *cmd, const struct iovec *iov, size_t count, + size_t *bytes_done) { struct dsa_cmd_context *dsa_cmd_context; assert(smr_env.use_dsa_sar); - if (resp->status != SMR_STATUS_SAR_FULL) - return FI_EAGAIN; - dsa_cmd_context = dsa_allocate_cmd_context(ep->dsa_context); if (!dsa_cmd_context) return -FI_ENOMEM; dsa_cmd_context->dir = OFI_COPY_BUF_TO_IOV; dsa_cmd_context->entry_ptr = entry_ptr; - smr_dsa_copy_sar(sar_pool, ep->dsa_context, dsa_cmd_context, resp, - cmd, iov, count, bytes_done, ep->region); + smr_dsa_copy_sar(sar_pool, ep->dsa_context, dsa_cmd_context, cmd, iov, + count, bytes_done, ep->region); return FI_SUCCESS; } @@ -1054,17 +1042,15 @@ void smr_dsa_init(void) {} void smr_dsa_cleanup(void) {} size_t smr_dsa_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_resp *resp, struct smr_cmd *cmd, - const struct iovec *iov, size_t count, size_t *bytes_done, - void *entry_ptr) + struct smr_cmd *cmd, const struct iovec *iov, size_t count, + size_t *bytes_done) { return -FI_ENOSYS; } size_t smr_dsa_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_resp *resp, struct smr_cmd *cmd, - const struct iovec *iov, size_t count, size_t *bytes_done, - void *entry_ptr) + struct smr_cmd *cmd, const struct iovec *iov, size_t count, + size_t *bytes_done) { return -FI_ENOSYS; } diff --git a/prov/shm/src/smr_ep.c b/prov/shm/src/smr_ep.c index dd07a452429..500c53bd615 100644 --- a/prov/shm/src/smr_ep.c +++ b/prov/shm/src/smr_ep.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2021 Intel Corporation. All rights reserved + * Copyright (c) Intel Corporation. All rights reserved * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -37,8 +37,6 @@ extern struct fi_ops_msg smr_msg_ops, smr_no_recv_msg_ops; extern struct fi_ops_tagged smr_tag_ops, smr_no_recv_tag_ops; extern struct fi_ops_rma smr_rma_ops; extern struct fi_ops_atomic smr_atomic_ops; -DEFINE_LIST(sock_name_list); -pthread_mutex_t sock_list_lock = PTHREAD_MUTEX_INITIALIZER; int smr_global_ep_idx = 0; int smr_setname(fid_t fid, void *addr, size_t addrlen) @@ -129,8 +127,9 @@ int smr_ep_setopt(fid_t fid, int level, int optname, const void *optval, if (optname == FI_OPT_CUDA_API_PERMITTED) { if (!hmem_ops[FI_HMEM_CUDA].initialized) { FI_WARN(&smr_prov, FI_LOG_CORE, - "Cannot set option FI_OPT_CUDA_API_PERMITTED when cuda library " - "or cuda device not available\n"); + "Cannot set option FI_OPT_CUDA_API_PERMITTED " + "when cuda library or cuda device " + "not available\n"); return -FI_EINVAL; } @@ -164,11 +163,10 @@ static void smr_send_name(struct smr_ep *ep, int64_t id) { struct smr_region *peer_smr; struct smr_cmd_entry *ce; - struct smr_inject_buf *tx_buf; int64_t pos; int ret; - peer_smr = smr_peer_region(ep->region, id); + peer_smr = smr_peer_region(ep, id); if (smr_peer_data(ep->region)[id].name_sent) return; @@ -177,20 +175,14 @@ static void smr_send_name(struct smr_ep *ep, int64_t id) if (ret == -FI_ENOENT) return; - tx_buf = smr_get_txbuf(peer_smr); - if (!tx_buf) { - smr_cmd_queue_discard(ce, pos); - return; - } + ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); + ce->cmd.hdr.op = SMR_OP_MAX + ofi_ctrl_connreq; + ce->cmd.hdr.id = id; + ce->cmd.hdr.cq_data = ep->region->pid; - ce->cmd.msg.hdr.op = SMR_OP_MAX + ofi_ctrl_connreq; - ce->cmd.msg.hdr.id = id; - ce->cmd.msg.hdr.data = ep->region->pid; - ce->cmd.msg.hdr.src_data = smr_get_offset(peer_smr, tx_buf); - - ce->cmd.msg.hdr.size = strlen(ep->name) + 1; - memcpy(tx_buf->data, ep->name, ce->cmd.msg.hdr.size); + ce->cmd.hdr.size = strlen(ep->name) + 1; + memcpy(ce->cmd.data.msg, ep->name, ce->cmd.hdr.size); smr_peer_data(ep->region)[id].name_sent = 1; smr_cmd_queue_commit(ce, pos); @@ -198,21 +190,24 @@ static void smr_send_name(struct smr_ep *ep, int64_t id) int64_t smr_verify_peer(struct smr_ep *ep, fi_addr_t fi_addr) { + struct smr_av *av; int64_t id; int ret; + av = container_of(ep->util_ep.av, struct smr_av, util_av); + id = smr_addr_lookup(ep->util_ep.av, fi_addr); assert(id < SMR_MAX_PEERS); if (id < 0) return -1; - if (smr_peer_data(ep->region)[id].addr.id >= 0) + if (smr_peer_data(ep->region)[id].id >= 0) return id; - if (!ep->region->map->peers[id].region) { - ofi_spin_lock(&ep->region->map->lock); - ret = smr_map_to_region(&smr_prov, ep->region->map, id); - ofi_spin_unlock(&ep->region->map->lock); + if (!av->smr_map.peers[id].region) { + ofi_spin_lock(&av->smr_map.lock); + ret = smr_map_to_region(&smr_prov, &av->smr_map, id); + ofi_spin_unlock(&av->smr_map.lock); if (ret) return -1; } @@ -222,201 +217,116 @@ int64_t smr_verify_peer(struct smr_ep *ep, fi_addr_t fi_addr) return -1; } -void smr_format_pend_resp(struct smr_tx_entry *pend, struct smr_cmd *cmd, - void *context, struct ofi_mr **mr, - const struct iovec *iov, uint32_t iov_count, - uint64_t op_flags, int64_t id, struct smr_resp *resp) +void smr_format_pend(struct smr_tx_entry *pend, void *context, + struct ofi_mr **mr, const struct iovec *iov, + uint32_t iov_count, uint64_t op_flags, int64_t id) { - pend->cmd = *cmd; pend->context = context; memcpy(pend->iov, iov, sizeof(*iov) * iov_count); pend->iov_count = iov_count; pend->peer_id = id; pend->op_flags = op_flags; - if (cmd->msg.hdr.op_src != smr_src_sar) { - pend->bytes_done = 0; - resp->status = FI_EBUSY; - } + pend->bytes_done = 0; if (mr) memcpy(pend->mr, mr, sizeof(*mr) * iov_count); else memset(pend->mr, 0, sizeof(*mr) * iov_count); - resp->msg_id = (uint64_t) (uintptr_t) pend; } void smr_generic_format(struct smr_cmd *cmd, int64_t peer_id, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags) { - cmd->msg.hdr.op = op; - cmd->msg.hdr.op_flags = op == ofi_op_read_req ? SMR_RMA_REQ : 0; - cmd->msg.hdr.tag = tag; - cmd->msg.hdr.id = peer_id; - cmd->msg.hdr.data = data; + cmd->hdr.op = op; + cmd->hdr.status = 0; + cmd->hdr.op_flags = 0; + cmd->hdr.tag = tag; + cmd->hdr.id = peer_id; + cmd->hdr.cq_data = data; + cmd->hdr.rx_ctx = 0; if (op_flags & FI_REMOTE_CQ_DATA) - cmd->msg.hdr.op_flags |= SMR_REMOTE_CQ_DATA; - if (op_flags & FI_COMPLETION) - cmd->msg.hdr.op_flags |= SMR_TX_COMPLETION; + cmd->hdr.op_flags |= SMR_REMOTE_CQ_DATA; } static void smr_format_inline(struct smr_cmd *cmd, struct ofi_mr **mr, const struct iovec *iov, size_t count) { - cmd->msg.hdr.op_src = smr_src_inline; - cmd->msg.hdr.size = ofi_copy_from_mr_iov(cmd->msg.data.msg, - SMR_MSG_DATA_LEN, mr, - iov, count, 0); + cmd->hdr.proto = smr_proto_inline; + cmd->hdr.size = ofi_copy_from_mr_iov(cmd->data.msg, SMR_MSG_DATA_LEN, + mr, iov, count, 0); } static void smr_format_inject(struct smr_cmd *cmd, struct ofi_mr **mr, const struct iovec *iov, size_t count, struct smr_region *smr, struct smr_inject_buf *tx_buf) { - cmd->msg.hdr.op_src = smr_src_inject; - cmd->msg.hdr.src_data = smr_get_offset(smr, tx_buf); - cmd->msg.hdr.size = ofi_copy_from_mr_iov(tx_buf->data, SMR_INJECT_SIZE, - mr, iov, count, 0); + cmd->hdr.proto = smr_proto_inject; + cmd->hdr.proto_data = smr_get_offset(smr, tx_buf); + if (cmd->hdr.op != ofi_op_read_req) + cmd->hdr.size = ofi_copy_from_mr_iov(tx_buf->data, + SMR_INJECT_SIZE, + mr, iov, count, 0); + else + cmd->hdr.size = ofi_total_iov_len(iov, count); } static void smr_format_iov(struct smr_cmd *cmd, const struct iovec *iov, - size_t count, size_t total_len, struct smr_region *smr, - struct smr_resp *resp) + size_t count, size_t total_len, struct smr_region *smr) { - cmd->msg.hdr.op_src = smr_src_iov; - cmd->msg.hdr.src_data = smr_get_offset(smr, resp); - cmd->msg.data.iov_count = count; - cmd->msg.hdr.size = total_len; - memcpy(cmd->msg.data.iov, iov, sizeof(*iov) * count); + cmd->hdr.proto = smr_proto_iov; + cmd->data.iov_count = count; + cmd->hdr.size = total_len; + memcpy(cmd->data.iov, iov, sizeof(*iov) * count); } static int smr_format_ipc(struct smr_cmd *cmd, void *ptr, size_t len, - struct smr_region *smr, struct smr_resp *resp, + struct smr_region *smr, enum fi_hmem_iface iface, uint64_t device) { int ret; void *base; - cmd->msg.hdr.op_src = smr_src_ipc; - cmd->msg.hdr.src_data = smr_get_offset(smr, resp); - cmd->msg.hdr.size = len; - cmd->msg.data.ipc_info.iface = iface; - cmd->msg.data.ipc_info.device = device; + cmd->hdr.proto = smr_proto_ipc; + cmd->hdr.size = len; + cmd->data.ipc_info.iface = iface; + cmd->data.ipc_info.device = device; - ret = ofi_hmem_get_base_addr(cmd->msg.data.ipc_info.iface, ptr, + ret = ofi_hmem_get_base_addr(cmd->data.ipc_info.iface, ptr, len, &base, - &cmd->msg.data.ipc_info.base_length); + &cmd->data.ipc_info.base_length); if (ret) return ret; - ret = ofi_hmem_get_handle(cmd->msg.data.ipc_info.iface, base, - cmd->msg.data.ipc_info.base_length, - (void **)&cmd->msg.data.ipc_info.ipc_handle); + ret = ofi_hmem_get_handle(cmd->data.ipc_info.iface, base, + cmd->data.ipc_info.base_length, + (void **)&cmd->data.ipc_info.ipc_handle); if (ret) return ret; - cmd->msg.data.ipc_info.base_addr = (uintptr_t) base; - cmd->msg.data.ipc_info.offset = (uintptr_t) ptr - (uintptr_t) base; + cmd->data.ipc_info.base_addr = (uintptr_t) base; + cmd->data.ipc_info.offset = (uintptr_t) ptr - (uintptr_t) base; return FI_SUCCESS; } -static int smr_format_mmap(struct smr_ep *ep, struct smr_cmd *cmd, - const struct iovec *iov, size_t count, size_t total_len, - struct smr_tx_entry *pend, struct smr_resp *resp) -{ - void *mapped_ptr; - int fd, ret, num; - uint64_t msg_id; - struct smr_ep_name *map_name; - - msg_id = ep->msg_id++; - map_name = calloc(1, sizeof(*map_name)); - if (!map_name) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "calloc error\n"); - return -FI_ENOMEM; - } - - pthread_mutex_lock(&ep_list_lock); - dlist_insert_tail(&map_name->entry, &ep_name_list); - pthread_mutex_unlock(&ep_list_lock); - num = smr_mmap_name(map_name->name, ep->name, msg_id); - if (num < 0) { - FI_WARN(&smr_prov, FI_LOG_AV, "generating shm file name failed\n"); - ret = -errno; - goto remove_entry; - } - - fd = shm_open(map_name->name, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); - if (fd < 0) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "shm_open error\n"); - ret = -errno; - goto remove_entry; - } - - ret = ftruncate(fd, total_len); - if (ret < 0) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "ftruncate error\n"); - goto unlink_close; - } - - mapped_ptr = mmap(NULL, total_len, PROT_READ | PROT_WRITE, - MAP_SHARED, fd, 0); - if (mapped_ptr == MAP_FAILED) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "mmap error\n"); - ret = -errno; - goto unlink_close; - } - - if (cmd->msg.hdr.op != ofi_op_read_req) { - if (ofi_copy_from_iov(mapped_ptr, total_len, iov, count, 0) - != total_len) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "copy from iov error\n"); - ret = -FI_EIO; - goto munmap; - } - munmap(mapped_ptr, total_len); - } else { - pend->map_ptr = mapped_ptr; - } - - cmd->msg.hdr.op_src = smr_src_mmap; - cmd->msg.hdr.msg_id = msg_id; - cmd->msg.hdr.src_data = smr_get_offset(ep->region, resp); - cmd->msg.hdr.size = total_len; - pend->map_name = map_name; - - close(fd); - return 0; - -munmap: - munmap(mapped_ptr, total_len); -unlink_close: - shm_unlink(map_name->name); - close(fd); -remove_entry: - dlist_remove(&map_name->entry); - free(map_name); - return ret; -} - -size_t smr_copy_to_sar(struct smr_freestack *sar_pool, struct smr_resp *resp, +size_t smr_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, struct smr_cmd *cmd, struct ofi_mr **mr, const struct iovec *iov, size_t count, size_t *bytes_done) { struct smr_sar_buf *sar_buf; - size_t start = *bytes_done; int next_sar_buf = 0; - if (resp->status != SMR_STATUS_SAR_EMPTY) - return 0; + if (smr_env.use_dsa_sar && ofi_mr_all_host(mr, count)) + return smr_dsa_copy_to_sar(ep, sar_pool, cmd, iov, count, + bytes_done); - while ((*bytes_done < cmd->msg.hdr.size) && - (next_sar_buf < cmd->msg.data.buf_batch_size)) { + while ((*bytes_done < cmd->hdr.size) && + (next_sar_buf < cmd->data.buf_batch_size)) { sar_buf = smr_freestack_get_entry_from_index( - sar_pool, cmd->msg.data.sar[next_sar_buf]); + sar_pool, cmd->data.sar[next_sar_buf]); *bytes_done += ofi_copy_from_mr_iov( sar_buf->buf, SMR_SAR_SIZE, mr, iov, count, @@ -425,29 +335,26 @@ size_t smr_copy_to_sar(struct smr_freestack *sar_pool, struct smr_resp *resp, next_sar_buf++; } - ofi_wmb(); - - resp->status = SMR_STATUS_SAR_FULL; - - return *bytes_done - start; + return FI_SUCCESS; } -size_t smr_copy_from_sar(struct smr_freestack *sar_pool, struct smr_resp *resp, +size_t smr_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, struct smr_cmd *cmd, struct ofi_mr **mr, const struct iovec *iov, size_t count, size_t *bytes_done) { struct smr_sar_buf *sar_buf; - size_t start = *bytes_done; int next_sar_buf = 0; - if (resp->status != SMR_STATUS_SAR_FULL) - return 0; + //set copy functions in sar entry? + if (smr_env.use_dsa_sar && ofi_mr_all_host(mr, count)) + return smr_dsa_copy_to_sar(ep, sar_pool, cmd, iov, count, + bytes_done); - while ((*bytes_done < cmd->msg.hdr.size) && - (next_sar_buf < cmd->msg.data.buf_batch_size)) { + while ((*bytes_done < cmd->hdr.size) && + (next_sar_buf < cmd->data.buf_batch_size)) { sar_buf = smr_freestack_get_entry_from_index( - sar_pool, cmd->msg.data.sar[next_sar_buf]); + sar_pool, cmd->data.sar[next_sar_buf]); *bytes_done += ofi_copy_to_mr_iov(mr, iov, count, *bytes_done, sar_buf->buf, SMR_SAR_SIZE); @@ -455,79 +362,54 @@ size_t smr_copy_from_sar(struct smr_freestack *sar_pool, struct smr_resp *resp, next_sar_buf++; } - ofi_wmb(); - - resp->status = SMR_STATUS_SAR_EMPTY; - return *bytes_done - start; + return FI_SUCCESS; } static int smr_format_sar(struct smr_ep *ep, struct smr_cmd *cmd, - struct ofi_mr **mr, const struct iovec *iov, size_t count, - size_t total_len, struct smr_region *smr, - struct smr_region *peer_smr, int64_t id, - struct smr_tx_entry *pending, struct smr_resp *resp) + struct ofi_mr **mr, const struct iovec *iov, + size_t count, size_t total_len, + struct smr_region *smr, struct smr_region *peer_smr, + int64_t id, struct smr_tx_entry *pending) { int i, ret; - uint32_t sar_needed; - if (peer_smr->max_sar_buf_per_peer == 0) + if (ep->region->max_sar_buf_per_peer == 0 || + smr_peer_data(ep->region)[id].sar_status) return -FI_EAGAIN; - if (smr_peer_data(ep->region)[id].sar_status) { - return -FI_EAGAIN; - } + cmd->data.buf_batch_size = MIN( + ep->region->max_sar_buf_per_peer, + (total_len + SMR_SAR_SIZE - 1) / SMR_SAR_SIZE); + cmd->data.buf_batch_size = MIN( + cmd->data.buf_batch_size, + smr_freestack_avail(smr_sar_pool(ep->region))); - sar_needed = (total_len + SMR_SAR_SIZE - 1) / SMR_SAR_SIZE; - cmd->msg.data.buf_batch_size = MIN(SMR_BUF_BATCH_MAX, - MIN(peer_smr->max_sar_buf_per_peer, sar_needed)); - - pthread_spin_lock(&peer_smr->lock); - for (i = 0; i < cmd->msg.data.buf_batch_size; i++) { - if (smr_freestack_isempty(smr_sar_pool(peer_smr))) { - cmd->msg.data.buf_batch_size = i; - if (i == 0) { - pthread_spin_unlock(&peer_smr->lock); - return -FI_EAGAIN; - } - break; - } - - cmd->msg.data.sar[i] = - smr_freestack_pop_by_index(smr_sar_pool(peer_smr)); + for (i = 0; i < cmd->data.buf_batch_size; i++) { + cmd->data.sar[i] = + smr_freestack_pop_by_index(smr_sar_pool(ep->region)); } - pthread_spin_unlock(&peer_smr->lock); - resp->status = SMR_STATUS_SAR_EMPTY; - cmd->msg.hdr.op_src = smr_src_sar; - cmd->msg.hdr.src_data = smr_get_offset(smr, resp); - cmd->msg.hdr.size = total_len; - pending->bytes_done = 0; + cmd->hdr.proto = smr_proto_sar; + cmd->hdr.size = total_len; /* Nothing to copy for 0 byte transfer */ - if (!cmd->msg.hdr.size) + if (!cmd->hdr.size) goto out; - if (cmd->msg.hdr.op != ofi_op_read_req) { - if (smr_env.use_dsa_sar && ofi_mr_all_host(mr, count)) { - ret = smr_dsa_copy_to_sar(ep, smr_sar_pool(peer_smr), - resp, cmd, iov, count, - &pending->bytes_done, pending); - if (ret != FI_SUCCESS) { - for (i = cmd->msg.data.buf_batch_size - 1; - i >= 0; i--) { - smr_freestack_push_by_index( - smr_sar_pool(peer_smr), - cmd->msg.data.sar[i]); - } - return -FI_EAGAIN; + if (cmd->hdr.op != ofi_op_read_req) { + ret = smr_copy_to_sar(ep, smr_sar_pool(ep->region), cmd, + mr, iov, count, &pending->bytes_done); + if (ret < 0) { + for (i = cmd->data.buf_batch_size - 1; i >= 0; i--) { + smr_freestack_push_by_index( + smr_sar_pool(ep->region), + cmd->data.sar[i]); } - } else { - smr_copy_to_sar(smr_sar_pool(peer_smr), resp, cmd, - mr, iov, count, &pending->bytes_done); + return -FI_EAGAIN; } } out: - smr_peer_data(smr)[id].sar_status = SMR_STATUS_SAR_FULL; + smr_peer_data(smr)[id].sar_status = FI_EBUSY; return FI_SUCCESS; } @@ -555,227 +437,181 @@ int smr_select_proto(void **desc, size_t iov_count, bool vma_avail, } if (op == ofi_op_read_req) { + if (total_len <= SMR_INJECT_SIZE) + return smr_proto_inject; if (use_ipc) - return smr_src_ipc; + return smr_proto_ipc; if (vma_avail && FI_HMEM_SYSTEM == iface) - return smr_src_iov; - return smr_src_sar; + return smr_proto_iov; + return smr_proto_sar; } if (fastcopy_avail && total_len <= smr_env.max_gdrcopy_size) - return total_len <= SMR_MSG_DATA_LEN ? smr_src_inline : - smr_src_inject; + return total_len <= SMR_MSG_DATA_LEN ? smr_proto_inline : + smr_proto_inject; if (op_flags & FI_INJECT) { + assert(total_len <= SMR_INJECT_SIZE); if (op_flags & FI_DELIVERY_COMPLETE) - return smr_src_sar; + return smr_proto_sar; return total_len <= SMR_MSG_DATA_LEN ? - smr_src_inline : smr_src_inject; + smr_proto_inline : smr_proto_inject; } if (use_ipc) - return smr_src_ipc; + return smr_proto_ipc; - if (total_len > SMR_INJECT_SIZE && vma_avail) - return smr_src_iov; + if (total_len > SMR_INJECT_SIZE) + return vma_avail ? smr_proto_iov: smr_proto_sar; if (op_flags & FI_DELIVERY_COMPLETE) - return smr_src_sar; + return smr_proto_sar; if (total_len <= SMR_MSG_DATA_LEN) - return smr_src_inline; + return smr_proto_inline; if (total_len <= SMR_INJECT_SIZE) - return smr_src_inject; + return smr_proto_inject; - if (total_len <= smr_env.sar_threshold) - return smr_src_sar; + return smr_proto_sar; - return smr_src_mmap; } -static ssize_t smr_do_inline(struct smr_ep *ep, struct smr_region *peer_smr, int64_t id, - int64_t peer_id, uint32_t op, uint64_t tag, uint64_t data, - uint64_t op_flags, struct ofi_mr **desc, - const struct iovec *iov, size_t iov_count, size_t total_len, - void *context, struct smr_cmd *cmd) +static ssize_t smr_do_inline(struct smr_ep *ep, struct smr_region *peer_smr, + int64_t id, int64_t peer_id, uint32_t op, + uint64_t tag, uint64_t data, uint64_t op_flags, + struct ofi_mr **desc, const struct iovec *iov, + size_t iov_count, size_t total_len, void *context, + struct smr_cmd *cmd) { + cmd->hdr.tx_ctx = 0; smr_generic_format(cmd, peer_id, op, tag, data, op_flags); smr_format_inline(cmd, desc, iov, iov_count); return FI_SUCCESS; } -static ssize_t smr_do_inject(struct smr_ep *ep, struct smr_region *peer_smr, int64_t id, - int64_t peer_id, uint32_t op, uint64_t tag, uint64_t data, - uint64_t op_flags, struct ofi_mr **desc, - const struct iovec *iov, size_t iov_count, size_t total_len, - void *context, struct smr_cmd *cmd) +static ssize_t smr_do_inject(struct smr_ep *ep, struct smr_region *peer_smr, + int64_t id, int64_t peer_id, uint32_t op, + uint64_t tag, uint64_t data, uint64_t op_flags, + struct ofi_mr **desc, const struct iovec *iov, + size_t iov_count, size_t total_len, void *context, + struct smr_cmd *cmd) { struct smr_inject_buf *tx_buf; + struct smr_tx_entry *pend; - tx_buf = smr_get_txbuf(peer_smr); - if (!tx_buf) - return -FI_EAGAIN; + tx_buf = smr_freestack_pop(smr_inject_pool(ep->region)); + assert(tx_buf); + + if (op == ofi_op_read_req) { + pend = ofi_freestack_pop(ep->tx_fs); + assert(pend); + + cmd->hdr.tx_ctx = (uintptr_t) pend; + smr_format_pend(pend, context, desc, iov, iov_count, op_flags, + id); + } else { + cmd->hdr.tx_ctx = 0; + } smr_generic_format(cmd, peer_id, op, tag, data, op_flags); - smr_format_inject(cmd, desc, iov, iov_count, peer_smr, tx_buf); + smr_format_inject(cmd, desc, iov, iov_count, ep->region, tx_buf); return FI_SUCCESS; } -static ssize_t smr_do_iov(struct smr_ep *ep, struct smr_region *peer_smr, int64_t id, - int64_t peer_id, uint32_t op, uint64_t tag, uint64_t data, - uint64_t op_flags, struct ofi_mr **desc, - const struct iovec *iov, size_t iov_count, size_t total_len, - void *context, struct smr_cmd *cmd) +static ssize_t smr_do_iov(struct smr_ep *ep, struct smr_region *peer_smr, + int64_t id, int64_t peer_id, uint32_t op, + uint64_t tag, uint64_t data, uint64_t op_flags, + struct ofi_mr **desc, const struct iovec *iov, + size_t iov_count, size_t total_len, void *context, + struct smr_cmd *cmd) { - struct smr_resp *resp; struct smr_tx_entry *pend; - if (ofi_cirque_isfull(smr_resp_queue(ep->region))) - return -FI_EAGAIN; - - resp = ofi_cirque_next(smr_resp_queue(ep->region)); pend = ofi_freestack_pop(ep->tx_fs); + assert(pend); + cmd->hdr.tx_ctx = (uintptr_t) pend; smr_generic_format(cmd, peer_id, op, tag, data, op_flags); - smr_format_iov(cmd, iov, iov_count, total_len, ep->region, resp); - smr_format_pend_resp(pend, cmd, context, desc, iov, - iov_count, op_flags, id, resp); - ofi_cirque_commit(smr_resp_queue(ep->region)); + smr_format_iov(cmd, iov, iov_count, total_len, ep->region); + + smr_format_pend(pend, context, desc, iov, iov_count, op_flags, id); return FI_SUCCESS; } -static ssize_t smr_do_sar(struct smr_ep *ep, struct smr_region *peer_smr, int64_t id, - int64_t peer_id, uint32_t op, uint64_t tag, uint64_t data, - uint64_t op_flags, struct ofi_mr **desc, - const struct iovec *iov, size_t iov_count, size_t total_len, - void *context, struct smr_cmd *cmd) +static ssize_t smr_do_sar(struct smr_ep *ep, struct smr_region *peer_smr, + int64_t id, int64_t peer_id, uint32_t op, + uint64_t tag, uint64_t data, uint64_t op_flags, + struct ofi_mr **desc, const struct iovec *iov, + size_t iov_count, size_t total_len, void *context, + struct smr_cmd *cmd) { - struct smr_resp *resp; struct smr_tx_entry *pend; int ret; - if (ofi_cirque_isfull(smr_resp_queue(ep->region))) - return -FI_EAGAIN; - - resp = ofi_cirque_next(smr_resp_queue(ep->region)); pend = ofi_freestack_pop(ep->tx_fs); + assert(pend); + + cmd->hdr.tx_ctx = (uintptr_t) pend; + smr_format_pend(pend, context, desc, iov, iov_count, op_flags, id); smr_generic_format(cmd, peer_id, op, tag, data, op_flags); ret = smr_format_sar(ep, cmd, desc, iov, iov_count, total_len, - ep->region, peer_smr, id, pend, resp); + ep->region, peer_smr, id, pend); if (ret) { ofi_freestack_push(ep->tx_fs, pend); return ret; } - smr_format_pend_resp(pend, cmd, context, desc, iov, - iov_count, op_flags, id, resp); - ofi_cirque_commit(smr_resp_queue(ep->region)); return FI_SUCCESS; } -static ssize_t smr_do_ipc(struct smr_ep *ep, struct smr_region *peer_smr, int64_t id, - int64_t peer_id, uint32_t op, uint64_t tag, uint64_t data, - uint64_t op_flags, struct ofi_mr **desc, - const struct iovec *iov, size_t iov_count, size_t total_len, - void *context, struct smr_cmd *cmd) +static ssize_t smr_do_ipc(struct smr_ep *ep, struct smr_region *peer_smr, + int64_t id, int64_t peer_id, uint32_t op, + uint64_t tag, uint64_t data, uint64_t op_flags, + struct ofi_mr **desc, const struct iovec *iov, + size_t iov_count, size_t total_len, void *context, + struct smr_cmd *cmd) { - struct smr_resp *resp; struct smr_tx_entry *pend; int ret = -FI_EAGAIN; - if (ofi_cirque_isfull(smr_resp_queue(ep->region))) - return -FI_EAGAIN; - - resp = ofi_cirque_next(smr_resp_queue(ep->region)); pend = ofi_freestack_pop(ep->tx_fs); + assert(pend); + cmd->hdr.tx_ctx = (uintptr_t) pend; smr_generic_format(cmd, peer_id, op, tag, data, op_flags); assert(iov_count == 1 && desc && desc[0]); ret = smr_format_ipc(cmd, iov[0].iov_base, total_len, ep->region, - resp, desc[0]->iface, desc[0]->device); + desc[0]->iface, desc[0]->device); if (ret) { FI_WARN_ONCE(&smr_prov, FI_LOG_EP_CTRL, - "unable to use IPC for msg, fallback to using SAR\n"); + "unable to use IPC for msg, " + "fallback to using SAR\n"); ofi_freestack_push(ep->tx_fs, pend); return smr_do_sar(ep, peer_smr, id, peer_id, op, tag, data, op_flags, desc, iov, iov_count, total_len, context, cmd); } - smr_format_pend_resp(pend, cmd, context, desc, iov, - iov_count, op_flags, id, resp); - ofi_cirque_commit(smr_resp_queue(ep->region)); + smr_format_pend(pend, context, desc, iov, iov_count, op_flags, id); return FI_SUCCESS; } -static ssize_t smr_do_mmap(struct smr_ep *ep, struct smr_region *peer_smr, int64_t id, - int64_t peer_id, uint32_t op, uint64_t tag, uint64_t data, - uint64_t op_flags, struct ofi_mr **desc, - const struct iovec *iov, size_t iov_count, size_t total_len, - void *context, struct smr_cmd *cmd) -{ - struct smr_resp *resp; - struct smr_tx_entry *pend; - int ret; - - if (ofi_cirque_isfull(smr_resp_queue(ep->region))) - return -FI_EAGAIN; - - resp = ofi_cirque_next(smr_resp_queue(ep->region)); - pend = ofi_freestack_pop(ep->tx_fs); - - smr_generic_format(cmd, peer_id, op, tag, data, op_flags); - ret = smr_format_mmap(ep, cmd, iov, iov_count, total_len, pend, resp); - if (ret) { - ofi_freestack_push(ep->tx_fs, pend); - return ret; - } - - smr_format_pend_resp(pend, cmd, context, desc, iov, - iov_count, op_flags, id, resp); - ofi_cirque_commit(smr_resp_queue(ep->region)); - - return FI_SUCCESS; -} - -smr_proto_func smr_proto_ops[smr_src_max] = { - [smr_src_inline] = &smr_do_inline, - [smr_src_inject] = &smr_do_inject, - [smr_src_iov] = &smr_do_iov, - [smr_src_mmap] = &smr_do_mmap, - [smr_src_sar] = &smr_do_sar, - [smr_src_ipc] = &smr_do_ipc, +smr_proto_func smr_proto_ops[smr_proto_max] = { + [smr_proto_inline] = &smr_do_inline, + [smr_proto_inject] = &smr_do_inject, + [smr_proto_iov] = &smr_do_iov, + [smr_proto_sar] = &smr_do_sar, + [smr_proto_ipc] = &smr_do_ipc, }; -static void smr_cleanup_epoll(struct smr_sock_info *sock_info) -{ - fd_signal_free(&sock_info->signal); - ofi_epoll_close(sock_info->epollfd); -} - -static void smr_free_sock_info(struct smr_ep *ep) -{ - int i, j; - - for (i = 0; i < SMR_MAX_PEERS; i++) { - if (!ep->sock_info->peers[i].device_fds) - continue; - for (j = 0; j < ep->sock_info->nfds; j++) - close(ep->sock_info->peers[i].device_fds[j]); - free(ep->sock_info->peers[i].device_fds); - } - free(ep->sock_info); - ep->sock_info = NULL; -} - static int smr_ep_close(struct fid *fid) { struct smr_ep *ep; @@ -785,15 +621,6 @@ static int smr_ep_close(struct fid *fid) if (smr_env.use_dsa_sar) smr_dsa_context_cleanup(ep); - if (ep->sock_info) { - fd_signal_set(&ep->sock_info->signal); - pthread_join(ep->sock_info->listener_thread, NULL); - close(ep->sock_info->listen_sock); - unlink(ep->sock_info->name); - smr_cleanup_epoll(ep->sock_info); - smr_free_sock_info(ep); - } - if (ep->srx) { /* shm is an owner provider */ if (ep->util_ep.ep_fid.msg != &smr_no_recv_msg_ops) @@ -850,7 +677,8 @@ static int smr_ep_bind_cq(struct smr_ep *ep, struct util_cq *cq, uint64_t flags) return ret; } -static int smr_ep_bind_cntr(struct smr_ep *ep, struct util_cntr *cntr, uint64_t flags) +static int smr_ep_bind_cntr(struct smr_ep *ep, struct util_cntr *cntr, + uint64_t flags) { int ret; @@ -868,166 +696,28 @@ static int smr_ep_bind_cntr(struct smr_ep *ep, struct util_cntr *cntr, uint64_t return FI_SUCCESS; } -static int smr_sendmsg_fd(int sock, int64_t id, int64_t peer_id, - int *fds, int nfds) -{ - struct msghdr msg; - struct cmsghdr *cmsg; - struct iovec iov; - char *ctrl_buf; - size_t ctrl_size; - int ret; - - ctrl_size = sizeof(*fds) * nfds; - ctrl_buf = calloc(CMSG_SPACE(ctrl_size), 1); - if (!ctrl_buf) - return -FI_ENOMEM; - - iov.iov_base = &peer_id; - iov.iov_len = sizeof(peer_id); - - memset(&msg, 0, sizeof(msg)); - msg.msg_control = ctrl_buf; - msg.msg_controllen = CMSG_SPACE(ctrl_size); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - cmsg = CMSG_FIRSTHDR(&msg); - cmsg->cmsg_level = SOL_SOCKET; - cmsg->cmsg_type = SCM_RIGHTS; - cmsg->cmsg_len = CMSG_LEN(ctrl_size); - memcpy(CMSG_DATA(cmsg), fds, ctrl_size); - - ret = sendmsg(sock, &msg, 0); - if (ret == sizeof(peer_id)) { - ret = FI_SUCCESS; - } else { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "sendmsg error\n"); - ret = -FI_EIO; - } - - free(ctrl_buf); - return ret; -} - -static int smr_recvmsg_fd(int sock, int64_t *peer_id, int *fds, int nfds) -{ - struct msghdr msg; - struct cmsghdr *cmsg; - struct iovec iov; - char *ctrl_buf; - size_t ctrl_size; - int ret; - - ctrl_size = sizeof(*fds) * nfds; - ctrl_buf = calloc(CMSG_SPACE(ctrl_size), 1); - if (!ctrl_buf) - return -FI_ENOMEM; - - iov.iov_base = peer_id; - iov.iov_len = sizeof(*peer_id); - - memset(&msg, 0, sizeof(msg)); - msg.msg_control = ctrl_buf; - msg.msg_controllen = CMSG_SPACE(ctrl_size); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ret = recvmsg(sock, &msg, 0); - if (ret == sizeof(*peer_id)) { - ret = FI_SUCCESS; - } else { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "recvmsg error\n"); - ret = -FI_EIO; - goto out; - } - - assert(!(msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))); - cmsg = CMSG_FIRSTHDR(&msg); - assert(cmsg && cmsg->cmsg_len == CMSG_LEN(ctrl_size) && - cmsg->cmsg_level == SOL_SOCKET && - cmsg->cmsg_type == SCM_RIGHTS && CMSG_DATA(cmsg)); - memcpy(fds, CMSG_DATA(cmsg), ctrl_size); -out: - free(ctrl_buf); - return ret; -} - -void smr_ep_exchange_fds(struct smr_ep *ep, int64_t id) -{ - struct smr_region *peer_smr = smr_peer_region(ep->region, id); - struct sockaddr_un server_sockaddr = {0}, client_sockaddr = {0}; - int ret = -1, sock = -1; - int64_t peer_id; - - if (peer_smr->pid == ep->region->pid || - !(peer_smr->flags & SMR_FLAG_IPC_SOCK)) - goto out; - - sock = socket(AF_UNIX, SOCK_STREAM, 0); - if (sock < 0) - goto out; - - client_sockaddr.sun_family = AF_UNIX; - - ret = bind(sock, (struct sockaddr *) &client_sockaddr, - (socklen_t) sizeof(client_sockaddr)); - if (ret == -1) { - if (errno != EADDRINUSE) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "bind error\n"); - ep->sock_info->peers[id].state = SMR_CMAP_FAILED; - } - close(sock); - return; - } - - server_sockaddr.sun_family = AF_UNIX; - - ret = connect(sock, (struct sockaddr *) &server_sockaddr, - sizeof(server_sockaddr)); - if (ret == -1) - goto cleanup; - - FI_DBG(&smr_prov, FI_LOG_EP_CTRL, "EP connected to UNIX socket %s\n", - server_sockaddr.sun_path); - - peer_id = smr_peer_data(ep->region)[id].addr.id; - ret = smr_sendmsg_fd(sock, id, peer_id, ep->sock_info->my_fds, - ep->sock_info->nfds); - if (ret) - goto cleanup; - - if (!ep->sock_info->peers[id].device_fds) { - ep->sock_info->peers[id].device_fds = - calloc(ep->sock_info->nfds, - sizeof(*ep->sock_info->peers[id].device_fds)); - if (!ep->sock_info->peers[id].device_fds) - goto cleanup; - } - ret = smr_recvmsg_fd(sock, &id, ep->sock_info->peers[id].device_fds, - ep->sock_info->nfds); - if (ret) - goto cleanup; - -cleanup: - close(sock); - unlink(client_sockaddr.sun_path); -out: - ep->sock_info->peers[id].state = ret ? - SMR_CMAP_FAILED : SMR_CMAP_SUCCESS; -} - static int smr_discard(struct fi_peer_rx_entry *rx_entry) { struct smr_cmd_ctx *cmd_ctx = rx_entry->peer_context; - struct smr_region *peer_smr; - struct smr_resp *resp; + struct smr_unexp_buf *sar_buf; - if (cmd_ctx->cmd.msg.hdr.src_data >= smr_src_iov) { - peer_smr = smr_peer_region(cmd_ctx->ep->region, - cmd_ctx->cmd.msg.hdr.id); - resp = smr_get_ptr(peer_smr, cmd_ctx->cmd.msg.hdr.src_data); - resp->status = SMR_STATUS_SUCCESS; + switch (cmd_ctx->cmd->hdr.proto) { + case smr_proto_inline: + break; + case smr_proto_sar: + while (!slist_empty(&cmd_ctx->buf_list)) { + slist_remove_head_container( + &cmd_ctx->buf_list, + struct smr_unexp_buf, sar_buf, + entry); + ofi_buf_free(sar_buf); + } + break; + case smr_proto_inject: + case smr_proto_iov: + case smr_proto_ipc: + smr_return_cmd(cmd_ctx->ep, cmd_ctx->cmd); + break; } ofi_buf_free(cmd_ctx); @@ -1077,7 +767,7 @@ static int smr_ep_bind(struct fid *ep_fid, struct fid *bfid, uint64_t flags) struct util_cntr, cntr_fid.fid), flags); break; case FI_CLASS_SRX_CTX: - ep->srx = (container_of(bfid, struct smr_domain, rx_ep.fid))->srx; + ep->srx = container_of(bfid, struct smr_domain, rx_ep.fid)->srx; break; default: FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "invalid fid class\n"); @@ -1131,7 +821,8 @@ static int smr_ep_ctrl(struct fid *fid, int command, void *arg) domain = container_of(ep->util_ep.domain, struct smr_domain, util_domain.domain_fid); - ret = util_ep_srx_context(&domain->util_domain, + ret = util_ep_srx_context( + &domain->util_domain, ep->rx_size, SMR_IOV_LIMIT, ep->min_multi_recv_size, &smr_update, &ep->util_ep.lock, &srx); @@ -1151,7 +842,7 @@ static int smr_ep_ctrl(struct fid *fid, int command, void *arg) ep->util_ep.ep_fid.msg = &smr_no_recv_msg_ops; ep->util_ep.ep_fid.tagged = &smr_no_recv_tag_ops; } - smr_exchange_all_peers(ep->region); + smr_exchange_all_peers(ep); if (smr_env.use_dsa_sar) smr_dsa_context_init(ep); @@ -1278,8 +969,8 @@ int smr_endpoint(struct fid_domain *domain, struct fi_info *info, ep->rx_size = info->rx_attr->size; ep->tx_size = info->tx_attr->size; - ret = ofi_endpoint_init(domain, &smr_util_prov, info, &ep->util_ep, context, - smr_ep_progress); + ret = ofi_endpoint_init(domain, &smr_util_prov, info, &ep->util_ep, + context, smr_ep_progress); if (ret) goto name; @@ -1292,8 +983,8 @@ int smr_endpoint(struct fid_domain *domain, struct fi_info *info, ep->tx_fs = smr_tx_fs_create(info->tx_attr->size, NULL, NULL); - dlist_init(&ep->sar_list); dlist_init(&ep->ipc_cpy_pend_list); + slist_init(&ep->overflow_list); ep->min_multi_recv_size = SMR_INJECT_SIZE; diff --git a/prov/shm/src/smr_fabric.c b/prov/shm/src/smr_fabric.c index 854f52b1440..fab9b2b583f 100644 --- a/prov/shm/src/smr_fabric.c +++ b/prov/shm/src/smr_fabric.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Intel Corporation, Inc. All rights reserved. + * Copyright (c) Intel Corporation, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/prov/shm/src/smr_init.c b/prov/shm/src/smr_init.c index 076384a7ba6..4353c0de8df 100644 --- a/prov/shm/src/smr_init.c +++ b/prov/shm/src/smr_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2021 Intel Corporation. All rights reserved. + * Copyright (c) Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -101,9 +101,8 @@ static int smr_shm_space_check(size_t tx_count, size_t rx_count) } shm_size_needed = num_of_core * smr_calculate_size_offsets(tx_count, rx_count, - NULL, NULL, NULL, - NULL, NULL, NULL, - NULL); + NULL, NULL, NULL, NULL, + NULL, NULL); err = statvfs(shm_fs, &stat); if (err) { FI_WARN(&smr_prov, FI_LOG_CORE, @@ -139,7 +138,8 @@ static int smr_getinfo(uint32_t version, const char *node, const char *service, if (ret) return ret; - ret = smr_shm_space_check((*info)->tx_attr->size, (*info)->rx_attr->size); + ret = smr_shm_space_check((*info)->tx_attr->size, + (*info)->rx_attr->size); if (ret) { fi_freeinfo(*info); return ret; @@ -147,15 +147,18 @@ static int smr_getinfo(uint32_t version, const char *node, const char *service, for (cur = *info; cur; cur = cur->next) { if (!(flags & FI_SOURCE) && !cur->dest_addr) - smr_resolve_addr(node, service, (char **) &cur->dest_addr, + smr_resolve_addr(node, service, + (char **) &cur->dest_addr, &cur->dest_addrlen); if (!cur->src_addr) { if (flags & FI_SOURCE) - smr_resolve_addr(node, service, (char **) &cur->src_addr, + smr_resolve_addr(node, service, + (char **) &cur->src_addr, &cur->src_addrlen); else - smr_resolve_addr(NULL, NULL, (char **) &cur->src_addr, + smr_resolve_addr(NULL, NULL, + (char **) &cur->src_addr, &cur->src_addrlen); } if (fast_rma) { diff --git a/prov/shm/src/smr_msg.c b/prov/shm/src/smr_msg.c index 8adfa7d11e8..4994d98824c 100644 --- a/prov/shm/src/smr_msg.c +++ b/prov/shm/src/smr_msg.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2021 Intel Corporation. All rights reserved + * Copyright (c) Intel Corporation. All rights reserved * (C) Copyright 2021 Amazon.com, Inc. or its affiliates. * * This software is available to you under a choice of one of two @@ -73,17 +73,18 @@ static ssize_t smr_recv(struct fid_ep *ep_fid, void *buf, size_t len, } static ssize_t smr_generic_sendmsg(struct smr_ep *ep, const struct iovec *iov, - void **desc, size_t iov_count, fi_addr_t addr, - uint64_t tag, uint64_t data, void *context, - uint32_t op, uint64_t op_flags) + void **desc, size_t iov_count, + fi_addr_t addr, uint64_t tag, uint64_t data, + void *context, uint32_t op, + uint64_t op_flags) { struct smr_region *peer_smr; - int64_t id, peer_id; + int64_t id, peer_id, pos; ssize_t ret = 0; size_t total_len; int proto; struct smr_cmd_entry *ce; - int64_t pos; + struct smr_cmd *cmd; assert(iov_count <= SMR_IOV_LIMIT); @@ -91,8 +92,8 @@ static ssize_t smr_generic_sendmsg(struct smr_ep *ep, const struct iovec *iov, if (id < 0) return -FI_EAGAIN; - peer_id = smr_peer_data(ep->region)[id].addr.id; - peer_smr = smr_peer_region(ep->region, id); + peer_id = smr_peer_data(ep->region)[id].id; + peer_smr = smr_peer_region(ep, id); if (smr_peer_data(ep->region)[id].sar_status) return -FI_EAGAIN; @@ -110,16 +111,33 @@ static ssize_t smr_generic_sendmsg(struct smr_ep *ep, const struct iovec *iov, smr_ipc_valid(ep, peer_smr, id, peer_id), op, total_len, op_flags); - ret = smr_proto_ops[proto](ep, peer_smr, id, peer_id, op, tag, data, op_flags, - (struct ofi_mr **)desc, iov, iov_count, total_len, - context, &ce->cmd); + if (proto != smr_proto_inline) { + if (smr_freestack_isempty(smr_cmd_stack(ep->region))) { + smr_cmd_queue_discard(ce, pos); + ret = -FI_EAGAIN; + goto unlock; + } + + cmd = smr_freestack_pop(smr_cmd_stack(ep->region)); + assert(cmd); + ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); + } else { + cmd = &ce->cmd; + ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); + } + + ret = smr_proto_ops[proto](ep, peer_smr, id, peer_id, op, tag, data, + op_flags, (struct ofi_mr **) desc, iov, + iov_count, total_len, context, cmd); if (ret) { smr_cmd_queue_discard(ce, pos); + if (proto != smr_proto_inline) + smr_freestack_push(smr_cmd_stack(ep->region), cmd); goto unlock; } smr_cmd_queue_commit(ce, pos); - if (proto != smr_src_inline && proto != smr_src_inject) + if (proto != smr_proto_inline && proto != smr_proto_inject) goto unlock; ret = smr_complete_tx(ep, context, op, op_flags); @@ -169,7 +187,8 @@ static ssize_t smr_sendmsg(struct fid_ep *ep_fid, const struct fi_msg *msg, return smr_generic_sendmsg(ep, msg->msg_iov, msg->desc, msg->iov_count, msg->addr, 0, msg->data, msg->context, - ofi_op_msg, flags | ep->util_ep.tx_msg_flags); + ofi_op_msg, + flags | ep->util_ep.tx_msg_flags); } static ssize_t smr_generic_inject(struct fid_ep *ep_fid, const void *buf, @@ -178,12 +197,12 @@ static ssize_t smr_generic_inject(struct fid_ep *ep_fid, const void *buf, { struct smr_ep *ep; struct smr_region *peer_smr; - int64_t id, peer_id; + int64_t id, peer_id, pos; ssize_t ret = 0; struct iovec msg_iov; int proto; struct smr_cmd_entry *ce; - int64_t pos; + struct smr_cmd *cmd; assert(len <= SMR_INJECT_SIZE); @@ -196,27 +215,53 @@ static ssize_t smr_generic_inject(struct fid_ep *ep_fid, const void *buf, if (id < 0) return -FI_EAGAIN; - peer_id = smr_peer_data(ep->region)[id].addr.id; - peer_smr = smr_peer_region(ep->region, id); + peer_id = smr_peer_data(ep->region)[id].id; + peer_smr = smr_peer_region(ep, id); - if (smr_peer_data(ep->region)[id].sar_status) - return -FI_EAGAIN; + ofi_genlock_lock(&ep->util_ep.lock); + if (smr_peer_data(ep->region)[id].sar_status) { + ret = -FI_EAGAIN; + goto unlock; + } ret = smr_cmd_queue_next(smr_cmd_queue(peer_smr), &ce, &pos); - if (ret == -FI_ENOENT) - return -FI_EAGAIN; + if (ret == -FI_ENOENT) { + ret = -FI_EAGAIN; + goto unlock; + } + + ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); + + if (len <= SMR_MSG_DATA_LEN) { + proto = smr_proto_inline; + cmd = &ce->cmd; + ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); + } else { + proto = smr_proto_inject; + if (smr_freestack_isempty(smr_cmd_stack(ep->region))) { + smr_cmd_queue_discard(ce, pos); + ret = -FI_EAGAIN; + goto unlock; + } + + cmd = smr_freestack_pop(smr_cmd_stack(ep->region)); + assert(cmd); + ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); + } - proto = len <= SMR_MSG_DATA_LEN ? smr_src_inline : smr_src_inject; ret = smr_proto_ops[proto](ep, peer_smr, id, peer_id, op, tag, data, - op_flags, NULL, &msg_iov, 1, len, NULL, &ce->cmd); + op_flags, NULL, &msg_iov, 1, len, NULL, cmd); if (ret) { smr_cmd_queue_discard(ce, pos); - return -FI_EAGAIN; + ret = -FI_EAGAIN; + goto unlock; } smr_cmd_queue_commit(ce, pos); ofi_ep_peer_tx_cntr_inc(&ep->util_ep, op); - return FI_SUCCESS; +unlock: + ofi_genlock_unlock(&ep->util_ep.lock); + return ret; } static ssize_t smr_inject(struct fid_ep *ep_fid, const void *buf, size_t len, diff --git a/prov/shm/src/smr_progress.c b/prov/shm/src/smr_progress.c index e91035a50d9..9013205c2f5 100644 --- a/prov/shm/src/smr_progress.c +++ b/prov/shm/src/smr_progress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020 Intel Corporation. All rights reserved + * Copyright (c) Intel Corporation. All rights reserved * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -32,200 +32,184 @@ #include "smr.h" -static inline void -smr_try_progress_to_sar(struct smr_ep *ep, struct smr_region *smr, - struct smr_freestack *sar_pool, struct smr_resp *resp, - struct smr_cmd *cmd, struct ofi_mr **mr, - struct iovec *iov, size_t iov_count, - size_t *bytes_done, void *entry_ptr) +static void smr_progress_overflow(struct smr_ep *ep) { - if (*bytes_done < cmd->msg.hdr.size) { - if (smr_env.use_dsa_sar && ofi_mr_all_host(mr, iov_count)) { - (void) smr_dsa_copy_to_sar(ep, sar_pool, resp, cmd, iov, - iov_count, bytes_done, entry_ptr); + struct smr_cmd_entry *ce; + struct smr_tx_entry *pending; + struct smr_region *peer_smr; + struct smr_cmd *cmd; + int64_t pos; + struct slist_entry *entry; + int ret; + + entry = ep->overflow_list.head; + while (entry) { + cmd = (struct smr_cmd *) entry; + pending = (struct smr_tx_entry *) cmd->hdr.tx_ctx; + peer_smr = smr_peer_region(ep, pending->peer_id); + ret = smr_cmd_queue_next(smr_cmd_queue(peer_smr), &ce, &pos); + if (ret == -FI_ENOENT) return; - } else { - smr_copy_to_sar(sar_pool, resp, cmd, mr, iov, iov_count, - bytes_done); - } + + ce->ptr = smr_local_to_peer(ep, pending->peer_id, cmd->hdr.id, + (uintptr_t) cmd); + + slist_remove_head(&ep->overflow_list); + smr_cmd_queue_commit(ce, pos); + entry = ep->overflow_list.head; } } -static inline void -smr_try_progress_from_sar(struct smr_ep *ep, struct smr_region *smr, - struct smr_freestack *sar_pool, struct smr_resp *resp, - struct smr_cmd *cmd, struct ofi_mr **mr, - struct iovec *iov, size_t iov_count, - size_t *bytes_done, void *entry_ptr) +static void smr_try_send_cmd(struct smr_ep *ep, struct smr_cmd *cmd) { - if (*bytes_done < cmd->msg.hdr.size) { - if (smr_env.use_dsa_sar && ofi_mr_all_host(mr, iov_count)) { - (void) smr_dsa_copy_from_sar(ep, sar_pool, resp, cmd, - iov, iov_count, bytes_done, entry_ptr); - return; - } else { - smr_copy_from_sar(sar_pool, resp, cmd, mr, - iov, iov_count, bytes_done); - } - } + cmd->hdr.entry = 0; + slist_insert_tail((struct slist_entry *) &cmd->hdr.entry, + &ep->overflow_list); + + smr_progress_overflow(ep); } -static int smr_progress_resp_entry(struct smr_ep *ep, struct smr_resp *resp, - struct smr_tx_entry *pending, uint64_t *err) +static inline void smr_free_sar_bufs(struct smr_ep *ep, struct smr_cmd *cmd, + struct smr_tx_entry *pending) { int i; - struct smr_region *peer_smr; - size_t inj_offset; + + for (i = cmd->data.buf_batch_size - 1; i >= 0; i--) { + smr_freestack_push_by_index(smr_sar_pool(ep->region), + cmd->data.sar[i]); + } + smr_peer_data(ep->region)[pending->peer_id].sar_status = 0; +} + +static int smr_progress_return_entry(struct smr_ep *ep, struct smr_cmd *cmd, + struct smr_tx_entry *pending) +{ struct smr_inject_buf *tx_buf = NULL; - struct smr_sar_buf *sar_buf = NULL; uint8_t *src; ssize_t hmem_copy_ret; + int ret = FI_SUCCESS; - peer_smr = smr_peer_region(ep->region, pending->peer_id); - - switch (pending->cmd.msg.hdr.op_src) { - case smr_src_iov: + switch (cmd->hdr.proto) { + case smr_proto_iov: break; - case smr_src_ipc: + case smr_proto_ipc: assert(pending->mr[0]); break; - case smr_src_sar: - sar_buf = smr_freestack_get_entry_from_index( - smr_sar_pool(peer_smr), pending->cmd.msg.data.sar[0]); - if (pending->bytes_done == pending->cmd.msg.hdr.size && - (resp->status == SMR_STATUS_SAR_EMPTY || - resp->status == SMR_STATUS_SUCCESS)) { - resp->status = SMR_STATUS_SUCCESS; - break; + case smr_proto_sar: + if (cmd->hdr.status) { + smr_free_sar_bufs(ep, cmd, pending); + return cmd->hdr.status; } - if (pending->cmd.msg.hdr.op == ofi_op_read_req) - smr_try_progress_from_sar(ep, peer_smr, - smr_sar_pool(peer_smr), resp, - &pending->cmd, pending->mr, pending->iov, - pending->iov_count, &pending->bytes_done, - pending); - else - smr_try_progress_to_sar(ep, peer_smr, - smr_sar_pool(peer_smr), resp, - &pending->cmd, pending->mr, pending->iov, - pending->iov_count, &pending->bytes_done, - pending); - if (pending->bytes_done != pending->cmd.msg.hdr.size || - resp->status != SMR_STATUS_SAR_EMPTY) { + if (cmd->hdr.op == ofi_op_read_req) { + ret = smr_copy_from_sar(ep, smr_sar_pool(ep->region), + cmd, pending->mr, pending->iov, + pending->iov_count, + &pending->bytes_done); + if (pending->bytes_done == cmd->hdr.size) { + smr_free_sar_bufs(ep, cmd, pending); + return FI_SUCCESS; + } + smr_try_send_cmd(ep, cmd); return -FI_EAGAIN; } - resp->status = SMR_STATUS_SUCCESS; - break; - case smr_src_mmap: - if (!pending->map_name) - break; - if (pending->cmd.msg.hdr.op == ofi_op_read_req) { - if (!*err) { - hmem_copy_ret = - ofi_copy_to_mr_iov(pending->mr, - pending->iov, - pending->iov_count, - 0, pending->map_ptr, - pending->cmd.msg.hdr.size); + if (pending->bytes_done == cmd->hdr.size) { + smr_free_sar_bufs(ep, cmd, pending); + return FI_SUCCESS; + } + + ret = smr_copy_to_sar(ep, smr_sar_pool(ep->region), cmd, + pending->mr, pending->iov, + pending->iov_count, &pending->bytes_done); + + smr_try_send_cmd(ep, cmd); + return -FI_EAGAIN; + case smr_proto_inject: + tx_buf = smr_get_ptr(ep->region, cmd->hdr.proto_data); + if (pending) { + if (pending->bytes_done != cmd->hdr.size && + cmd->hdr.op != ofi_op_atomic) { + src = cmd->hdr.op == ofi_op_atomic_compare ? + tx_buf->buf : tx_buf->data; + hmem_copy_ret = ofi_copy_to_mr_iov( + pending->mr, + pending->iov, + pending->iov_count, + 0, src,cmd->hdr.size); + if (hmem_copy_ret < 0) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "Copy from mmapped file failed with code %d\n", + "RMA read/fetch failed " + "with code %d\n", (int)(-hmem_copy_ret)); - *err = hmem_copy_ret; - } else if (hmem_copy_ret != pending->cmd.msg.hdr.size) { + ret = hmem_copy_ret; + } else if (hmem_copy_ret != cmd->hdr.size) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "Incomplete copy from mmapped file\n"); - *err = -FI_ETRUNC; + "Incomplete rma read/fetch " + "buffer copied\n"); + ret = -FI_ETRUNC; } else { - pending->bytes_done = (size_t) hmem_copy_ret; + pending->bytes_done = + (size_t) hmem_copy_ret; } } - munmap(pending->map_ptr, pending->cmd.msg.hdr.size); - } - shm_unlink(pending->map_name->name); - dlist_remove(&pending->map_name->entry); - free(pending->map_name); - pending->map_name = NULL; - break; - case smr_src_inject: - inj_offset = (size_t) pending->cmd.msg.hdr.src_data; - tx_buf = smr_get_ptr(peer_smr, inj_offset); - if (*err || pending->bytes_done == pending->cmd.msg.hdr.size || - pending->cmd.msg.hdr.op == ofi_op_atomic) - break; - - src = pending->cmd.msg.hdr.op == ofi_op_atomic_compare ? - tx_buf->buf : tx_buf->data; - hmem_copy_ret = ofi_copy_to_mr_iov(pending->mr, - pending->iov, pending->iov_count, - 0, src, pending->cmd.msg.hdr.size); - - if (hmem_copy_ret < 0) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "RMA read/fetch failed with code %d\n", - (int)(-hmem_copy_ret)); - *err = hmem_copy_ret; - } else if (hmem_copy_ret != pending->cmd.msg.hdr.size) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "Incomplete rma read/fetch buffer copied\n"); - *err = -FI_ETRUNC; - } else { - pending->bytes_done = (size_t) hmem_copy_ret; } + smr_freestack_push(smr_inject_pool(ep->region), tx_buf); break; default: FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "unidentified operation type\n"); } - if (tx_buf) { - smr_release_txbuf(peer_smr, tx_buf); - } else if (sar_buf) { - pthread_spin_lock(&peer_smr->lock); - for (i = pending->cmd.msg.data.buf_batch_size - 1; i >= 0; i--) { - smr_freestack_push_by_index(smr_sar_pool(peer_smr), - pending->cmd.msg.data.sar[i]); - } - pthread_spin_unlock(&peer_smr->lock); - smr_peer_data(ep->region)[pending->peer_id].sar_status = 0; - } - - return FI_SUCCESS; + return ret; } -static void smr_progress_resp(struct smr_ep *ep) +static void smr_progress_return(struct smr_ep *ep) { - struct smr_resp *resp; + struct smr_return_entry *queue_entry; + struct smr_cmd *cmd; struct smr_tx_entry *pending; + int64_t pos; int ret; ofi_genlock_lock(&ep->util_ep.lock); - while (!ofi_cirque_isempty(smr_resp_queue(ep->region))) { - resp = ofi_cirque_head(smr_resp_queue(ep->region)); - if (resp->status == SMR_STATUS_BUSY) - break; - - pending = (struct smr_tx_entry *) resp->msg_id; - if (smr_progress_resp_entry(ep, resp, pending, &resp->status)) + while (1) { + ret = smr_return_queue_head(smr_return_queue(ep->region), + &queue_entry, &pos); + if (ret == -FI_ENOENT) break; - if (resp->status) { - ret = smr_write_err_comp(ep->util_ep.tx_cq, pending->context, - pending->op_flags, pending->cmd.msg.hdr.tag, - resp->status); - } else { - ret = smr_complete_tx(ep, pending->context, - pending->cmd.msg.hdr.op, pending->op_flags); - } - if (ret) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "unable to process tx completion\n"); - break; + cmd = (struct smr_cmd *) queue_entry->ptr; + pending = (struct smr_tx_entry *) cmd->hdr.tx_ctx; + + ret = smr_progress_return_entry(ep, cmd, pending); + if (ret != -FI_EAGAIN) { + if (pending) { + if (cmd->hdr.status) { + ret = smr_write_err_comp( + ep->util_ep.tx_cq, + pending->context, + pending->op_flags, + cmd->hdr.tag, + cmd->hdr.status); + } else { + ret = smr_complete_tx( + ep, pending->context, + cmd->hdr.op, + pending->op_flags); + } + if (ret) { + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, + "unable to process " + "tx completion\n"); + } + ofi_freestack_push(ep->tx_fs, pending); + } + smr_freestack_push(smr_cmd_stack(ep->region), cmd); } - ofi_freestack_push(ep->tx_fs, pending); - ofi_cirque_discard(smr_resp_queue(ep->region)); + smr_return_queue_release(smr_return_queue(ep->region), + queue_entry, pos); } ofi_genlock_unlock(&ep->util_ep.lock); } @@ -237,13 +221,13 @@ static int smr_progress_inline(struct smr_cmd *cmd, struct ofi_mr **mr, ssize_t hmem_copy_ret; hmem_copy_ret = ofi_copy_to_mr_iov(mr, iov, iov_count, 0, - cmd->msg.data.msg, cmd->msg.hdr.size); + cmd->data.msg, cmd->hdr.size); if (hmem_copy_ret < 0) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "inline recv failed with code %d\n", (int)(-hmem_copy_ret)); return hmem_copy_ret; - } else if (hmem_copy_ret != cmd->msg.hdr.size) { + } else if (hmem_copy_ret != cmd->hdr.size) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "inline recv truncated\n"); return -FI_ETRUNC; @@ -258,38 +242,44 @@ static int smr_progress_inject(struct smr_cmd *cmd, struct ofi_mr **mr, struct iovec *iov, size_t iov_count, size_t *total_len, struct smr_ep *ep, int err) { + struct smr_region *peer_smr; struct smr_inject_buf *tx_buf; size_t inj_offset; ssize_t hmem_copy_ret; - assert(cmd->msg.hdr.op != ofi_op_read_req); + inj_offset = (size_t) cmd->hdr.proto_data; - inj_offset = (size_t) cmd->msg.hdr.src_data; - tx_buf = smr_get_ptr(ep->region, inj_offset); + peer_smr = smr_peer_region(ep, cmd->hdr.id); + tx_buf = smr_get_ptr(peer_smr, inj_offset); + if (err) + goto out; - if (err) { - smr_release_txbuf(ep->region, tx_buf); - return err; + if (cmd->hdr.op == ofi_op_read_req) { + hmem_copy_ret = ofi_copy_from_mr_iov(tx_buf->data, + cmd->hdr.size, mr, iov, + iov_count, 0); + } else { + hmem_copy_ret = ofi_copy_to_mr_iov(mr, iov, iov_count, 0, + tx_buf->data, cmd->hdr.size); } - hmem_copy_ret = ofi_copy_to_mr_iov(mr, iov, iov_count, 0, tx_buf->data, - cmd->msg.hdr.size); - smr_release_txbuf(ep->region, tx_buf); - if (hmem_copy_ret < 0) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "inject recv failed with code %d\n", (int)(-hmem_copy_ret)); - return hmem_copy_ret; - } else if (hmem_copy_ret != cmd->msg.hdr.size) { + err = hmem_copy_ret; + } else if (hmem_copy_ret != cmd->hdr.size) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "inject recv truncated\n"); - return -FI_ETRUNC; + err = -FI_ETRUNC; + } else { + *total_len = hmem_copy_ret; + err = FI_SUCCESS; } - *total_len = hmem_copy_ret; - - return FI_SUCCESS; +out: + smr_return_cmd(ep, cmd); + return err; } static int smr_progress_iov(struct smr_cmd *cmd, struct iovec *iov, @@ -298,105 +288,134 @@ static int smr_progress_iov(struct smr_cmd *cmd, struct iovec *iov, { struct smr_region *peer_smr; struct ofi_xpmem_client *xpmem; - struct smr_resp *resp; int ret; - peer_smr = smr_peer_region(ep->region, cmd->msg.hdr.id); - resp = smr_get_ptr(peer_smr, cmd->msg.hdr.src_data); + peer_smr = smr_peer_region(ep, cmd->hdr.id); - xpmem = &smr_peer_data(ep->region)[cmd->msg.hdr.id].xpmem; + xpmem = &smr_peer_data(ep->region)[cmd->hdr.id].xpmem; - ret = ofi_shm_p2p_copy(ep->p2p_type, iov, iov_count, cmd->msg.data.iov, - cmd->msg.data.iov_count, cmd->msg.hdr.size, - peer_smr->pid, cmd->msg.hdr.op == ofi_op_read_req, + ret = ofi_shm_p2p_copy(ep->p2p_type, iov, iov_count, cmd->data.iov, + cmd->data.iov_count, cmd->hdr.size, + peer_smr->pid, cmd->hdr.op == ofi_op_read_req, xpmem); if (!ret) - *total_len = cmd->msg.hdr.size; + *total_len = cmd->hdr.size; - //Status must be set last (signals peer: op done, valid resp entry) - resp->status = -ret; + cmd->hdr.status = ret; + smr_return_cmd(ep, cmd); return ret; } -static int smr_mmap_peer_copy(struct smr_ep *ep, struct smr_cmd *cmd, - struct ofi_mr **mr, struct iovec *iov, - size_t iov_count, size_t *total_len) +static void smr_buffer_sar(struct smr_ep *ep, struct smr_pend_entry *sar_entry, + struct smr_cmd *cmd) { - char shm_name[SMR_NAME_MAX]; - void *mapped_ptr; - int fd, num; - int ret = 0; - ssize_t hmem_copy_ret; + struct smr_region *peer_smr; + struct smr_sar_buf *sar_buf; + struct smr_unexp_buf *buf; + size_t bytes; + int next_buf = 0; - num = smr_mmap_name(shm_name, - ep->region->map->peers[cmd->msg.hdr.id].peer.name, - cmd->msg.hdr.msg_id); - if (num < 0) { - FI_WARN(&smr_prov, FI_LOG_AV, "generating shm file name failed\n"); - return -errno; - } + peer_smr = smr_peer_region(ep, cmd->hdr.id); - fd = shm_open(shm_name, O_RDWR, S_IRUSR | S_IWUSR); - if (fd < 0) { - FI_WARN(&smr_prov, FI_LOG_AV, "shm_open error\n"); - return -errno; - } + while (next_buf < cmd->data.buf_batch_size && + sar_entry->bytes_done < cmd->hdr.size) { + buf = ofi_buf_alloc(ep->unexp_buf_pool); + if (!buf) { + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, + "Error allocating buffer for unexpected SAR " + "(-FI_ENOMEM)\n"); + return; + } + slist_insert_tail(&buf->entry, &sar_entry->cmd_ctx->buf_list); - mapped_ptr = mmap(NULL, cmd->msg.hdr.size, PROT_READ | PROT_WRITE, - MAP_SHARED, fd, 0); - if (mapped_ptr == MAP_FAILED) { - FI_WARN(&smr_prov, FI_LOG_AV, "mmap error %s\n", strerror(errno)); - ret = -errno; - goto unlink_close; - } + sar_buf = smr_freestack_get_entry_from_index( + smr_sar_pool(peer_smr), + cmd->data.sar[next_buf]); + bytes = MIN(cmd->hdr.size - sar_entry->bytes_done, + SMR_SAR_SIZE); - if (cmd->msg.hdr.op == ofi_op_read_req) { - hmem_copy_ret = ofi_copy_from_mr_iov(mapped_ptr, - cmd->msg.hdr.size, mr, iov, - iov_count, 0); - } else { - hmem_copy_ret = ofi_copy_to_mr_iov(mr, iov, iov_count, 0, - mapped_ptr, cmd->msg.hdr.size); - } + memcpy(buf->buf, sar_buf->buf, bytes); - if (hmem_copy_ret < 0) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "mmap copy iov failed with code %d\n", - (int)(-hmem_copy_ret)); - ret = hmem_copy_ret; - } else if (hmem_copy_ret != cmd->msg.hdr.size) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "mmap copy iov truncated\n"); - ret = -FI_ETRUNC; + sar_entry->bytes_done += bytes; + next_buf++; } - - *total_len = hmem_copy_ret; - - munmap(mapped_ptr, cmd->msg.hdr.size); -unlink_close: - shm_unlink(shm_name); - close(fd); - return ret; } -static int smr_progress_mmap(struct smr_cmd *cmd, struct ofi_mr **mr, - struct iovec *iov, size_t iov_count, - size_t *total_len, struct smr_ep *ep) +static int smr_progress_pending_sar(struct smr_ep *ep, struct smr_cmd *cmd) { + struct smr_pend_entry *sar_entry; struct smr_region *peer_smr; - struct smr_resp *resp; + void *comp_ctx; + uint64_t comp_flags; int ret; - peer_smr = smr_peer_region(ep->region, cmd->msg.hdr.id); - resp = smr_get_ptr(peer_smr, cmd->msg.hdr.src_data); + sar_entry = (struct smr_pend_entry *) cmd->hdr.rx_ctx; + if (sar_entry->cmd_ctx) { + smr_buffer_sar(ep, sar_entry, cmd); + goto out; + } - ret = smr_mmap_peer_copy(ep, cmd, mr, iov, iov_count, total_len); + peer_smr = smr_peer_region(ep, cmd->hdr.id); - //Status must be set last (signals peer: op done, valid resp entry) - resp->status = -ret; + if (cmd->hdr.op == ofi_op_read_req) + ret = smr_copy_to_sar(ep, smr_sar_pool(peer_smr), cmd, + sar_entry->mr, sar_entry->iov, + sar_entry->iov_count, + &sar_entry->bytes_done); + else + ret = smr_copy_from_sar(ep, smr_sar_pool(peer_smr), cmd, + sar_entry->mr, sar_entry->iov, + sar_entry->iov_count, + &sar_entry->bytes_done); + if (ret) + cmd->hdr.status = ret; - return -ret; + if (sar_entry->bytes_done == cmd->hdr.size || ret) { + if (sar_entry->rx_entry) { + comp_ctx = sar_entry->rx_entry->context; + comp_flags = smr_rx_cq_flags( + sar_entry->rx_entry->flags, + cmd->hdr.op_flags); + } else { + comp_ctx = NULL; + comp_flags = smr_rx_cq_flags(0, cmd->hdr.op_flags); + } + if (ret) { + ret = smr_write_err_comp(ep->util_ep.rx_cq, + comp_ctx, comp_flags, + cmd->hdr.tag, ret); + } else { + ret = smr_complete_rx(ep, comp_ctx, + cmd->hdr.op, comp_flags, + sar_entry->bytes_done, + sar_entry->iov[0].iov_base, + cmd->hdr.id, cmd->hdr.tag, + cmd->hdr.cq_data); + } + if (ret) { + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, + "unable to process rx completion\n"); + } + if (sar_entry->rx_entry) + ep->srx->owner_ops->free_entry(sar_entry->rx_entry); + + ofi_buf_free(sar_entry); + } + +out: + smr_return_cmd(ep, cmd); + return FI_SUCCESS; +} + +static int smr_progress_pending(struct smr_ep *ep, struct smr_cmd *cmd) +{ + switch (cmd->hdr.proto) { + case smr_proto_sar: + return smr_progress_pending_sar(ep, cmd); + default: + return -FI_EINVAL; + } } static struct smr_pend_entry *smr_progress_sar(struct smr_cmd *cmd, @@ -405,42 +424,37 @@ static struct smr_pend_entry *smr_progress_sar(struct smr_cmd *cmd, size_t *total_len, struct smr_ep *ep) { struct smr_region *peer_smr; - struct smr_pend_entry *sar_entry; - struct smr_resp *resp; + struct smr_pend_entry *sar_entry = NULL; struct iovec sar_iov[SMR_IOV_LIMIT]; + size_t bytes_done = 0; + int ret; - peer_smr = smr_peer_region(ep->region, cmd->msg.hdr.id); - resp = smr_get_ptr(peer_smr, cmd->msg.hdr.src_data); + peer_smr = smr_peer_region(ep, cmd->hdr.id); /* Nothing to do for 0 byte transfer */ - if (!cmd->msg.hdr.size) { - resp->status = SMR_STATUS_SUCCESS; - return NULL; - } + if (!cmd->hdr.size) + goto out; memcpy(sar_iov, iov, sizeof(*iov) * iov_count); - (void) ofi_truncate_iov(sar_iov, &iov_count, cmd->msg.hdr.size); - - sar_entry = ofi_buf_alloc(ep->pend_buf_pool); - dlist_insert_tail(&sar_entry->entry, &ep->sar_list); + (void) ofi_truncate_iov(sar_iov, &iov_count, cmd->hdr.size); - if (cmd->msg.hdr.op == ofi_op_read_req) - smr_try_progress_to_sar(ep, peer_smr, smr_sar_pool(ep->region), - resp, cmd, mr, sar_iov, iov_count, - total_len, sar_entry); + if (cmd->hdr.op == ofi_op_read_req) + ret = smr_copy_to_sar(ep, smr_sar_pool(peer_smr), cmd, + mr, sar_iov, iov_count, &bytes_done); else - smr_try_progress_from_sar(ep, peer_smr, - smr_sar_pool(ep->region), resp, cmd, mr, - sar_iov, iov_count, total_len, sar_entry); + ret = smr_copy_from_sar(ep, smr_sar_pool(peer_smr), cmd, + mr, sar_iov, iov_count, &bytes_done); + if (ret) + cmd->hdr.status = ret; - if (*total_len == cmd->msg.hdr.size) { - dlist_remove(&sar_entry->entry); - ofi_buf_free(sar_entry); - return NULL; - } - sar_entry->cmd = *cmd; + if (bytes_done == cmd->hdr.size) + goto out; + + sar_entry = ofi_buf_alloc(ep->pend_buf_pool); + + cmd->hdr.rx_ctx = (uintptr_t) sar_entry; + sar_entry->bytes_done = bytes_done; sar_entry->cmd_ctx = NULL; - sar_entry->bytes_done = *total_len; memcpy(sar_entry->iov, sar_iov, sizeof(*sar_iov) * iov_count); sar_entry->iov_count = iov_count; sar_entry->rx_entry = rx_entry ? rx_entry : NULL; @@ -449,35 +463,36 @@ static struct smr_pend_entry *smr_progress_sar(struct smr_cmd *cmd, else memset(sar_entry->mr, 0, sizeof(*mr) * iov_count); - *total_len = cmd->msg.hdr.size; + *total_len = cmd->hdr.size; +out: + smr_return_cmd(ep, cmd); return sar_entry; } -static int -smr_ipc_async_copy(struct smr_ep *ep, void *ptr, - struct fi_peer_rx_entry *rx_entry, - struct iovec *iov, size_t iov_count, - struct ofi_mr_entry *mr_entry, struct smr_cmd *cmd, - struct smr_pend_entry **pend) +static int smr_ipc_async_copy(struct smr_ep *ep, void *ptr, + struct fi_peer_rx_entry *rx_entry, + struct iovec *iov, size_t iov_count, + struct ofi_mr_entry *mr_entry, + struct smr_cmd *cmd, struct smr_pend_entry **pend) { struct smr_pend_entry *ipc_entry; - enum fi_hmem_iface iface = cmd->msg.data.ipc_info.iface; - uint64_t device = cmd->msg.data.ipc_info.device; + enum fi_hmem_iface iface = cmd->data.ipc_info.iface; + uint64_t device = cmd->data.ipc_info.device; int ret; ipc_entry = ofi_buf_alloc(ep->pend_buf_pool); if (!ipc_entry) return -FI_ENOMEM; - ipc_entry->cmd = *cmd; ipc_entry->ipc_entry = mr_entry; ipc_entry->bytes_done = 0; + ipc_entry->cmd = cmd; memcpy(ipc_entry->iov, iov, sizeof(*iov) * iov_count); ipc_entry->iov_count = iov_count; ipc_entry->rx_entry = rx_entry; if (rx_entry) { - ipc_entry->rx_entry->flags |= cmd->msg.hdr.op_flags; - ipc_entry->rx_entry->flags &= ~SMR_MULTI_RECV; + if (cmd->hdr.op_flags & SMR_REMOTE_CQ_DATA) + ipc_entry->rx_entry->flags |= FI_REMOTE_CQ_DATA; } ret = ofi_create_async_copy_event(iface, device, @@ -485,14 +500,15 @@ smr_ipc_async_copy(struct smr_ep *ep, void *ptr, if (ret < 0) goto fail; - if (cmd->msg.hdr.op == ofi_op_read_req) { - ret = ofi_async_copy_from_hmem_iov(ptr, cmd->msg.hdr.size, - iface, device, iov, iov_count, 0, - ipc_entry->async_event); + if (cmd->hdr.op == ofi_op_read_req) { + ret = ofi_async_copy_from_hmem_iov(ptr, cmd->hdr.size, + iface, device, iov, + iov_count, 0, + ipc_entry->async_event); } else { - ret = ofi_async_copy_to_hmem_iov(iface, device, iov, iov_count, 0, - ptr, cmd->msg.hdr.size, - ipc_entry->async_event); + ret = ofi_async_copy_to_hmem_iov(iface, device, iov, iov_count, + 0, ptr, cmd->hdr.size, + ipc_entry->async_event); } if (ret < 0) @@ -514,80 +530,77 @@ static struct smr_pend_entry *smr_progress_ipc(struct smr_cmd *cmd, size_t iov_count, size_t *total_len, struct smr_ep *ep, int *err) { - struct smr_region *peer_smr; - struct smr_resp *resp; void *ptr; int ret; ssize_t hmem_copy_ret; struct ofi_mr_entry *mr_entry; struct smr_domain *domain; + struct smr_av *av; struct smr_pend_entry *ipc_entry; domain = container_of(ep->util_ep.domain, struct smr_domain, util_domain); - peer_smr = smr_peer_region(ep->region, cmd->msg.hdr.id); - resp = smr_get_ptr(peer_smr, cmd->msg.hdr.src_data); - - if (cmd->msg.data.ipc_info.iface == FI_HMEM_ZE) - ze_set_pid_fd((void **) &cmd->msg.data.ipc_info.ipc_handle, - ep->region->map->peers[cmd->msg.hdr.id].pid_fd); + av = container_of(ep->util_ep.av, struct smr_av, util_av); + if (cmd->data.ipc_info.iface == FI_HMEM_ZE) + ze_set_pid_fd((void **) &cmd->data.ipc_info.ipc_handle, + av->smr_map.peers[cmd->hdr.id].pid_fd); //TODO disable IPC if more than 1 interface is initialized - ret = ofi_ipc_cache_search(domain->ipc_cache, cmd->msg.hdr.id, - &cmd->msg.data.ipc_info, &mr_entry); + ret = ofi_ipc_cache_search(domain->ipc_cache, cmd->hdr.id, + &cmd->data.ipc_info, &mr_entry); if (ret) goto out; ptr = (char *) (uintptr_t) mr_entry->info.mapped_addr + - (uintptr_t) cmd->msg.data.ipc_info.offset; + (uintptr_t) cmd->data.ipc_info.offset; - if (cmd->msg.data.ipc_info.iface == FI_HMEM_ROCR) { + if (cmd->data.ipc_info.iface == FI_HMEM_ROCR) { *total_len = 0; ipc_entry = NULL; - resp->status = SMR_STATUS_BUSY; ret = smr_ipc_async_copy(ep, (char*)ptr, rx_entry, iov, iov_count, mr_entry, cmd, &ipc_entry); - if (ret) - resp->status = -ret; + if (ret) { + cmd->hdr.status = ret; + goto uncache; + } return ipc_entry; } - if (cmd->msg.hdr.op == ofi_op_read_req) { - hmem_copy_ret = ofi_copy_from_hmem_iov(ptr, cmd->msg.hdr.size, - cmd->msg.data.ipc_info.iface, - cmd->msg.data.ipc_info.device, iov, + if (cmd->hdr.op == ofi_op_read_req) { + hmem_copy_ret = ofi_copy_from_hmem_iov(ptr, cmd->hdr.size, + cmd->data.ipc_info.iface, + cmd->data.ipc_info.device, iov, iov_count, 0); } else { - hmem_copy_ret = ofi_copy_to_hmem_iov(cmd->msg.data.ipc_info.iface, - cmd->msg.data.ipc_info.device, iov, - iov_count, 0, ptr, cmd->msg.hdr.size); + hmem_copy_ret = ofi_copy_to_hmem_iov(cmd->data.ipc_info.iface, + cmd->data.ipc_info.device, iov, + iov_count, 0, ptr, cmd->hdr.size); } - ofi_mr_cache_delete(domain->ipc_cache, mr_entry); - if (hmem_copy_ret < 0) *err = hmem_copy_ret; - else if (hmem_copy_ret != cmd->msg.hdr.size) + else if (hmem_copy_ret != cmd->hdr.size) *err = -FI_ETRUNC; else *err = FI_SUCCESS; *total_len = hmem_copy_ret; +uncache: + ofi_mr_cache_delete(domain->ipc_cache, mr_entry); out: - //Status must be set last (signals peer: op done, valid resp entry) - resp->status = -ret; - + cmd->hdr.status = ret; + smr_return_cmd(ep, cmd); return NULL; } -static void smr_do_atomic(void *src, struct ofi_mr *dst_mr, void *dst, - void *cmp, enum fi_datatype datatype, enum fi_op op, - size_t cnt, uint16_t flags) +static void smr_do_atomic(struct smr_cmd *cmd, void *src, struct ofi_mr *dst_mr, + void *dst, void *cmp, enum fi_datatype datatype, + enum fi_op op, size_t cnt, uint16_t flags) { char tmp_result[SMR_INJECT_SIZE]; char tmp_dst[SMR_INJECT_SIZE]; @@ -608,9 +621,13 @@ static void smr_do_atomic(void *src, struct ofi_mr *dst_mr, void *dst, if (ofi_atomic_isswap_op(op)) { ofi_atomic_swap_handler(op, datatype, cpy_dst, src, cmp, tmp_result, cnt); - } else if (flags & SMR_RMA_REQ && ofi_atomic_isreadwrite_op(op)) { + memcpy(src, tmp_result, cnt * ofi_datatype_size(datatype)); + } else if (cmd->hdr.op == ofi_op_atomic_fetch || + ofi_atomic_isreadwrite_op(op)) { ofi_atomic_readwrite_handler(op, datatype, cpy_dst, src, tmp_result, cnt); + memcpy(src, op == FI_ATOMIC_READ ? cpy_dst : tmp_result, + cnt * ofi_datatype_size(datatype)); } else if (ofi_atomic_iswrite_op(op)) { ofi_atomic_write_handler(op, datatype, cpy_dst, src, cnt); } else { @@ -618,10 +635,6 @@ static void smr_do_atomic(void *src, struct ofi_mr *dst_mr, void *dst, "invalid atomic operation\n"); } - if (flags & SMR_RMA_REQ) - memcpy(src, op == FI_ATOMIC_READ ? cpy_dst : tmp_result, - cnt * ofi_datatype_size(datatype)); - if (cpy_dst != dst) { ret = ofi_copy_to_hmem(dst_mr->iface, dst_mr->device, dst, cpy_dst, cnt * ofi_datatype_size(datatype)); @@ -635,18 +648,18 @@ static int smr_progress_inline_atomic(struct smr_cmd *cmd, struct ofi_mr **mr, struct fi_ioc *ioc, size_t ioc_count, size_t *len) { int i; - uint8_t *src = cmd->msg.data.msg; + uint8_t *src = cmd->data.msg; - assert(cmd->msg.hdr.op == ofi_op_atomic); + assert(cmd->hdr.op == ofi_op_atomic); - for (i = *len = 0; i < ioc_count && *len < cmd->msg.hdr.size; i++) { - smr_do_atomic(&src[*len], mr[i], ioc[i].addr, NULL, - cmd->msg.hdr.datatype, cmd->msg.hdr.atomic_op, - ioc[i].count, cmd->msg.hdr.op_flags); - *len += ioc[i].count * ofi_datatype_size(cmd->msg.hdr.datatype); + for (i = *len = 0; i < ioc_count && *len < cmd->hdr.size; i++) { + smr_do_atomic(cmd, &src[*len], mr[i], ioc[i].addr, NULL, + cmd->hdr.datatype, cmd->hdr.atomic_op, + ioc[i].count, cmd->hdr.op_flags); + *len += ioc[i].count * ofi_datatype_size(cmd->hdr.datatype); } - if (*len != cmd->msg.hdr.size) { + if (*len != cmd->hdr.size) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "recv truncated"); return -FI_ETRUNC; @@ -663,12 +676,12 @@ static int smr_progress_inject_atomic(struct smr_cmd *cmd, struct ofi_mr **mr, uint8_t *src, *comp; int i; - inj_offset = (size_t) cmd->msg.hdr.src_data; + inj_offset = (size_t) cmd->hdr.proto_data; tx_buf = smr_get_ptr(ep->region, inj_offset); if (err) goto out; - switch (cmd->msg.hdr.op) { + switch (cmd->hdr.op) { case ofi_op_atomic_compare: src = tx_buf->buf; comp = tx_buf->comp; @@ -679,24 +692,21 @@ static int smr_progress_inject_atomic(struct smr_cmd *cmd, struct ofi_mr **mr, break; } - for (i = *len = 0; i < ioc_count && *len < cmd->msg.hdr.size; i++) { - smr_do_atomic(&src[*len], mr[i], ioc[i].addr, - comp ? &comp[*len] : NULL, cmd->msg.hdr.datatype, - cmd->msg.hdr.atomic_op, ioc[i].count, - cmd->msg.hdr.op_flags); - *len += ioc[i].count * ofi_datatype_size(cmd->msg.hdr.datatype); + for (i = *len = 0; i < ioc_count && *len < cmd->hdr.size; i++) { + smr_do_atomic(cmd, &src[*len], mr[i], ioc[i].addr, + comp ? &comp[*len] : NULL, cmd->hdr.datatype, + cmd->hdr.atomic_op, ioc[i].count, + cmd->hdr.op_flags); + *len += ioc[i].count * ofi_datatype_size(cmd->hdr.datatype); } - if (*len != cmd->msg.hdr.size) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "recv truncated"); + if (*len != cmd->hdr.size) { + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "recv truncated"); err = -FI_ETRUNC; } out: - if (!(cmd->msg.hdr.op_flags & SMR_RMA_REQ)) - smr_release_txbuf(ep->region, tx_buf); - + smr_return_cmd(ep, cmd); return err; } @@ -710,34 +720,29 @@ static int smr_start_common(struct smr_ep *ep, struct smr_cmd *cmd, int ret; int err = 0; - switch (cmd->msg.hdr.op_src) { - case smr_src_inline: + switch (cmd->hdr.proto) { + case smr_proto_inline: err = smr_progress_inline(cmd, (struct ofi_mr **) rx_entry->desc, rx_entry->iov, rx_entry->count, &total_len); break; - case smr_src_inject: + case smr_proto_inject: err = smr_progress_inject(cmd, (struct ofi_mr **) rx_entry->desc, rx_entry->iov, rx_entry->count, &total_len, ep, 0); break; - case smr_src_iov: + case smr_proto_iov: err = smr_progress_iov(cmd, rx_entry->iov, rx_entry->count, &total_len, ep); break; - case smr_src_mmap: - err = smr_progress_mmap(cmd, (struct ofi_mr **) rx_entry->desc, - rx_entry->iov, rx_entry->count, - &total_len, ep); - break; - case smr_src_sar: + case smr_proto_sar: pend = smr_progress_sar(cmd, rx_entry, (struct ofi_mr **) rx_entry->desc, rx_entry->iov, rx_entry->count, &total_len, ep); break; - case smr_src_ipc: + case smr_proto_ipc: pend = smr_progress_ipc(cmd, rx_entry, (struct ofi_mr **) rx_entry->desc, rx_entry->iov, rx_entry->count, @@ -752,7 +757,7 @@ static int smr_start_common(struct smr_ep *ep, struct smr_cmd *cmd, if (!pend) { comp_buf = rx_entry->iov[0].iov_base; comp_flags = smr_rx_cq_flags(rx_entry->flags, - cmd->msg.hdr.op_flags); + cmd->hdr.op_flags); if (err) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "error processing op\n"); @@ -761,10 +766,10 @@ static int smr_start_common(struct smr_ep *ep, struct smr_cmd *cmd, comp_flags, rx_entry->tag, -err); } else { - ret = smr_complete_rx(ep, rx_entry->context, cmd->msg.hdr.op, - comp_flags, total_len, comp_buf, - cmd->msg.hdr.id, cmd->msg.hdr.tag, - cmd->msg.hdr.data); + ret = smr_complete_rx(ep, rx_entry->context, + cmd->hdr.op, comp_flags, + total_len, comp_buf, cmd->hdr.id, + cmd->hdr.tag, cmd->hdr.cq_data); } if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -777,49 +782,50 @@ static int smr_start_common(struct smr_ep *ep, struct smr_cmd *cmd, } static int smr_copy_saved(struct smr_cmd_ctx *cmd_ctx, - struct fi_peer_rx_entry *rx_entry) + struct fi_peer_rx_entry *rx_entry) { struct smr_unexp_buf *sar_buf; + struct smr_pend_entry *sar_entry; size_t bytes = 0; uint64_t comp_flags; int ret; + sar_entry = (struct smr_pend_entry *) cmd_ctx->cmd->hdr.rx_ctx; while (!slist_empty(&cmd_ctx->buf_list)) { slist_remove_head_container(&cmd_ctx->buf_list, - struct smr_unexp_buf, sar_buf, entry); + struct smr_unexp_buf, sar_buf, + entry); bytes += ofi_copy_to_mr_iov((struct ofi_mr **) rx_entry->desc, - rx_entry->iov, rx_entry->count, bytes, - sar_buf->buf, - MIN(cmd_ctx->cmd.msg.hdr.size - bytes, - SMR_SAR_SIZE)); + rx_entry->iov, rx_entry->count, + bytes, sar_buf->buf, + MIN(cmd_ctx->cmd->hdr.size - bytes, + SMR_SAR_SIZE)); ofi_buf_free(sar_buf); } - if (bytes != cmd_ctx->cmd.msg.hdr.size) { - assert(cmd_ctx->sar_entry); - cmd_ctx->sar_entry->cmd_ctx = NULL; - cmd_ctx->sar_entry->rx_entry = rx_entry; - memcpy(cmd_ctx->sar_entry->iov, rx_entry->iov, + if (bytes != cmd_ctx->cmd->hdr.size) { + sar_entry->cmd_ctx = NULL; + sar_entry->rx_entry = rx_entry; + memcpy(sar_entry->iov, rx_entry->iov, sizeof(*rx_entry->iov) * rx_entry->count); - cmd_ctx->sar_entry->iov_count = rx_entry->count; - (void) ofi_truncate_iov(cmd_ctx->sar_entry->iov, - &cmd_ctx->sar_entry->iov_count, - cmd_ctx->cmd.msg.hdr.size); - memcpy(cmd_ctx->sar_entry->mr, rx_entry->desc, - sizeof(*rx_entry->desc) * cmd_ctx->sar_entry->iov_count); + sar_entry->iov_count = rx_entry->count; + (void) ofi_truncate_iov(sar_entry->iov, + &sar_entry->iov_count, + cmd_ctx->cmd->hdr.size); + memcpy(sar_entry->mr, rx_entry->desc, + sizeof(*rx_entry->desc) * sar_entry->iov_count); return FI_SUCCESS; } - assert(!cmd_ctx->sar_entry); comp_flags = smr_rx_cq_flags(rx_entry->flags, - cmd_ctx->cmd.msg.hdr.op_flags); + cmd_ctx->cmd->hdr.op_flags); ret = smr_complete_rx(cmd_ctx->ep, rx_entry->context, - cmd_ctx->cmd.msg.hdr.op, comp_flags, + cmd_ctx->cmd->hdr.op, comp_flags, bytes, rx_entry->iov[0].iov_base, - cmd_ctx->cmd.msg.hdr.id, - cmd_ctx->cmd.msg.hdr.tag, - cmd_ctx->cmd.msg.hdr.data); + cmd_ctx->cmd->hdr.id, + cmd_ctx->cmd->hdr.tag, + cmd_ctx->cmd->hdr.cq_data); if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "unable to process rx completion\n"); @@ -833,13 +839,23 @@ static int smr_copy_saved(struct smr_cmd_ctx *cmd_ctx, int smr_unexp_start(struct fi_peer_rx_entry *rx_entry) { struct smr_cmd_ctx *cmd_ctx = rx_entry->peer_context; - int ret; + int ret = FI_SUCCESS; - if (cmd_ctx->cmd.msg.hdr.op_src == smr_src_sar || - cmd_ctx->cmd.msg.hdr.op_src == smr_src_inject) + switch (cmd_ctx->cmd->hdr.proto) { + case smr_proto_sar: ret = smr_copy_saved(cmd_ctx, rx_entry); - else - ret = smr_start_common(cmd_ctx->ep, &cmd_ctx->cmd, rx_entry); + break; + case smr_proto_inline: + case smr_proto_inject: + case smr_proto_iov: + case smr_proto_ipc: + ret = smr_start_common(cmd_ctx->ep, cmd_ctx->cmd, rx_entry); + break; + default: + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, + "unidentified operation type\n"); + ret = -FI_EINVAL; + } ofi_buf_free(cmd_ctx); @@ -848,65 +864,60 @@ int smr_unexp_start(struct fi_peer_rx_entry *rx_entry) static void smr_progress_connreq(struct smr_ep *ep, struct smr_cmd *cmd) { + struct smr_av *av; struct smr_region *peer_smr; - struct smr_inject_buf *tx_buf; - size_t inj_offset; int64_t idx = -1; int ret = 0; - inj_offset = (size_t) cmd->msg.hdr.src_data; - tx_buf = smr_get_ptr(ep->region, inj_offset); - - ret = smr_map_add(&smr_prov, ep->region->map, - (char *) tx_buf->data, &idx); + av = container_of(ep->util_ep.av, struct smr_av, util_av); + ret = smr_map_add(&smr_prov, &av->smr_map, (char *) cmd->data.msg, + &idx); if (ret || idx < 0) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "Error processing mapping request\n"); return; } - peer_smr = smr_peer_region(ep->region, idx); + peer_smr = smr_peer_region(ep, idx); if (!peer_smr) { - ofi_spin_lock(&ep->region->map->lock); - ret = smr_map_to_region(&smr_prov, ep->region->map, idx); - ofi_spin_unlock(&ep->region->map->lock); + ofi_spin_lock(&av->smr_map.lock); + ret = smr_map_to_region(&smr_prov, &av->smr_map, idx); + ofi_spin_unlock(&av->smr_map.lock); if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "Could not map peer region\n"); return; } - peer_smr = smr_peer_region(ep->region, idx); + peer_smr = smr_peer_region(ep, idx); } assert(peer_smr); - if (peer_smr->pid != (int) cmd->msg.hdr.data) { + if (peer_smr->pid != (int) cmd->hdr.cq_data) { /* TODO track and update/complete in error any transfers * to or from old mapping */ - ofi_spin_lock(&ep->region->map->lock); - smr_unmap_region(&smr_prov, ep->region->map, idx, false); - smr_map_to_region(&smr_prov, ep->region->map, idx); - ofi_spin_unlock(&ep->region->map->lock); - peer_smr = smr_peer_region(ep->region, idx); + ofi_spin_lock(&av->smr_map.lock); + smr_unmap_region(&smr_prov, &av->smr_map, idx, false); + smr_map_to_region(&smr_prov, &av->smr_map, idx); + ofi_spin_unlock(&av->smr_map.lock); + peer_smr = smr_peer_region(ep, idx); } - smr_set_ipc_valid(ep->region, idx); - smr_peer_data(peer_smr)[cmd->msg.hdr.id].addr.id = idx; - smr_peer_data(ep->region)[idx].addr.id = cmd->msg.hdr.id; + smr_set_ipc_valid(ep, idx); + smr_peer_data(peer_smr)[cmd->hdr.id].id = idx; + smr_peer_data(ep->region)[idx].id = cmd->hdr.id; - smr_release_txbuf(ep->region, tx_buf); - assert(ep->region->map->num_peers > 0); - ep->region->max_sar_buf_per_peer = SMR_MAX_PEERS / - ep->region->map->num_peers; + assert(av->smr_map.num_peers > 0); + ep->region->max_sar_buf_per_peer = MIN(SMR_BUF_BATCH_MAX, + SMR_MAX_PEERS / av->smr_map.num_peers); } static int smr_alloc_cmd_ctx(struct smr_ep *ep, - struct fi_peer_rx_entry *rx_entry, struct smr_cmd *cmd) + struct fi_peer_rx_entry *rx_entry, + struct smr_cmd *cmd) { struct smr_cmd_ctx *cmd_ctx; struct smr_pend_entry *sar_entry; - struct smr_inject_buf *tx_buf; - struct smr_unexp_buf *buf; cmd_ctx = ofi_buf_alloc(ep->cmd_ctx_pool); if (!cmd_ctx) { @@ -915,35 +926,29 @@ static int smr_alloc_cmd_ctx(struct smr_ep *ep, return -FI_ENOMEM; } cmd_ctx->ep = ep; + cmd_ctx->cmd = cmd; - rx_entry->msg_size = cmd->msg.hdr.size; - if (cmd->msg.hdr.op_flags & SMR_REMOTE_CQ_DATA) { + rx_entry->msg_size = cmd->hdr.size; + if (cmd->hdr.op_flags & SMR_REMOTE_CQ_DATA) { rx_entry->flags |= FI_REMOTE_CQ_DATA; - rx_entry->cq_data = cmd->msg.hdr.data; + rx_entry->cq_data = cmd->hdr.cq_data; } - if (cmd->msg.hdr.op_src == smr_src_inline) { - memcpy(&cmd_ctx->cmd, cmd, sizeof(cmd->msg.hdr) + cmd->msg.hdr.size); - } else if (cmd->msg.hdr.op_src == smr_src_inject) { - memcpy(&cmd_ctx->cmd, cmd, sizeof(cmd->msg.hdr)); - buf = ofi_buf_alloc(ep->unexp_buf_pool); - if (!buf) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "Error allocating buffer\n"); - ofi_buf_free(cmd_ctx); - return -FI_ENOMEM; - } - cmd_ctx->sar_entry = NULL; - slist_init(&cmd_ctx->buf_list); - slist_insert_tail(&buf->entry, &cmd_ctx->buf_list); - tx_buf = smr_get_ptr(ep->region, (size_t) cmd->msg.hdr.src_data); - memcpy(buf->buf, tx_buf->buf, cmd->msg.hdr.size); - smr_release_txbuf(ep->region, tx_buf); - } else if (cmd->msg.hdr.op_src == smr_src_sar) { - memcpy(&cmd_ctx->cmd, cmd, sizeof(*cmd)); - slist_init(&cmd_ctx->buf_list); - - if (cmd->msg.hdr.size) { + switch(cmd->hdr.proto) { + case smr_proto_inline: + cmd_ctx->cmd = &cmd_ctx->cmd_cpy; + memcpy(&cmd_ctx->cmd_cpy, cmd, + sizeof(cmd->hdr) + cmd->hdr.size); + goto out; + case smr_proto_inject: + case smr_proto_ipc: + case smr_proto_iov: + cmd_ctx->cmd = cmd; + goto out; + case smr_proto_sar: + cmd_ctx->cmd = &cmd_ctx->cmd_cpy; + + if (cmd->hdr.size) { sar_entry = ofi_buf_alloc(ep->pend_buf_pool); if (!sar_entry) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -951,20 +956,26 @@ static int smr_alloc_cmd_ctx(struct smr_ep *ep, ofi_buf_free(cmd_ctx); return -FI_ENOMEM; } + cmd->hdr.rx_ctx = (uintptr_t) sar_entry; + memcpy(&cmd_ctx->cmd_cpy, cmd, sizeof(cmd->hdr)); - memcpy(&sar_entry->cmd, cmd, sizeof(*cmd)); + slist_init(&cmd_ctx->buf_list); sar_entry->cmd_ctx = cmd_ctx; sar_entry->bytes_done = 0; sar_entry->rx_entry = rx_entry; - dlist_insert_tail(&sar_entry->entry, &ep->sar_list); - - cmd_ctx->sar_entry = sar_entry; + smr_buffer_sar(ep, sar_entry, cmd); } - } else { - memcpy(&cmd_ctx->cmd, cmd, sizeof(*cmd)); + smr_return_cmd(ep, cmd); + break; + default: + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, + "unidentified operation type\n"); + ofi_buf_free(cmd_ctx); + return -FI_EINVAL; } +out: rx_entry->peer_context = cmd_ctx; return FI_SUCCESS; } @@ -973,12 +984,18 @@ static int smr_progress_cmd_msg(struct smr_ep *ep, struct smr_cmd *cmd) { struct fi_peer_match_attr attr; struct fi_peer_rx_entry *rx_entry; + struct smr_av *av; int ret; - attr.addr = ep->region->map->peers[cmd->msg.hdr.id].fiaddr; - attr.msg_size = cmd->msg.hdr.size; - attr.tag = cmd->msg.hdr.tag; - if (cmd->msg.hdr.op == ofi_op_tagged) { + if (cmd->hdr.rx_ctx) + return smr_progress_pending(ep, cmd); + + av = container_of(ep->util_ep.av, struct smr_av, util_av); + + attr.addr = av->smr_map.peers[cmd->hdr.id].fiaddr; + attr.msg_size = cmd->hdr.size; + attr.tag = cmd->hdr.tag; + if (cmd->hdr.op == ofi_op_tagged) { ret = ep->srx->owner_ops->get_tag(ep->srx, &attr, &rx_entry); if (ret == -FI_ENOENT) { ret = smr_alloc_cmd_ctx(ep, rx_entry, cmd); @@ -1021,66 +1038,59 @@ static int smr_progress_cmd_msg(struct smr_ep *ep, struct smr_cmd *cmd) return ret < 0 ? ret : 0; } -static int smr_progress_cmd_rma(struct smr_ep *ep, struct smr_cmd *cmd, - struct smr_cmd *rma_cmd) +static int smr_progress_cmd_rma(struct smr_ep *ep, struct smr_cmd *cmd) { - struct smr_region *peer_smr; struct smr_domain *domain; - struct smr_resp *resp; struct iovec iov[SMR_IOV_LIMIT]; - size_t iov_count; - size_t total_len = 0; + struct fi_rma_iov *rma_iov; + size_t iov_count, total_len = 0; int err = 0, ret = 0; struct ofi_mr *mr[SMR_IOV_LIMIT]; + if (cmd->hdr.rx_ctx) + return smr_progress_pending(ep, cmd); + domain = container_of(ep->util_ep.domain, struct smr_domain, util_domain); ofi_genlock_lock(&domain->util_domain.lock); - for (iov_count = 0; iov_count < rma_cmd->rma.rma_count; iov_count++) { + for (iov_count = 0; iov_count < cmd->rma.rma_count; iov_count++) { + rma_iov = &cmd->rma.rma_iov[iov_count]; ret = ofi_mr_map_verify(&domain->util_domain.mr_map, - (uintptr_t *) &(rma_cmd->rma.rma_iov[iov_count].addr), - rma_cmd->rma.rma_iov[iov_count].len, - rma_cmd->rma.rma_iov[iov_count].key, - ofi_rx_mr_reg_flags(cmd->msg.hdr.op, 0), - (void **) &mr[iov_count]); + (uintptr_t *) &(rma_iov->addr), + rma_iov->len, rma_iov->key, + ofi_rx_mr_reg_flags(cmd->hdr.op, 0), + (void **) &mr[iov_count]); if (ret) break; - iov[iov_count].iov_base = (void *) rma_cmd->rma.rma_iov[iov_count].addr; - iov[iov_count].iov_len = rma_cmd->rma.rma_iov[iov_count].len; + iov[iov_count].iov_base = (void *) rma_iov->addr; + iov[iov_count].iov_len = rma_iov->len; } ofi_genlock_unlock(&domain->util_domain.lock); if (ret) goto out; - switch (cmd->msg.hdr.op_src) { - case smr_src_inline: + switch (cmd->hdr.proto) { + case smr_proto_inline: err = smr_progress_inline(cmd, mr, iov, iov_count, &total_len); break; - case smr_src_inject: + case smr_proto_inject: err = smr_progress_inject(cmd, mr, iov, iov_count, &total_len, ep, ret); - if (cmd->msg.hdr.op == ofi_op_read_req && cmd->msg.hdr.data) { - peer_smr = smr_peer_region(ep->region, cmd->msg.hdr.id); - resp = smr_get_ptr(peer_smr, cmd->msg.hdr.data); - resp->status = -err; - } + if (cmd->hdr.op == ofi_op_read_req && cmd->hdr.cq_data) + cmd->hdr.status = err; break; - case smr_src_iov: + case smr_proto_iov: err = smr_progress_iov(cmd, iov, iov_count, &total_len, ep); break; - case smr_src_mmap: - err = smr_progress_mmap(cmd, mr, iov, iov_count, &total_len, - ep); - break; - case smr_src_sar: + case smr_proto_sar: if (smr_progress_sar(cmd, NULL, mr, iov, iov_count, &total_len, ep)) return ret; break; - case smr_src_ipc: + case smr_proto_ipc: if (smr_progress_ipc(cmd, NULL, mr, iov, iov_count, &total_len, ep, &ret)) return ret; @@ -1095,66 +1105,65 @@ static int smr_progress_cmd_rma(struct smr_ep *ep, struct smr_cmd *cmd, FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "error processing rma op\n"); ret = smr_write_err_comp(ep->util_ep.rx_cq, NULL, - smr_rx_cq_flags(0, cmd->msg.hdr.op_flags), - 0, -err); + smr_rx_cq_flags(0, cmd->hdr.op_flags), + 0, -err); } else { - ret = smr_complete_rx(ep, (void *) cmd->msg.hdr.msg_id, - cmd->msg.hdr.op, smr_rx_cq_flags(0, - cmd->msg.hdr.op_flags), total_len, - iov_count ? iov[0].iov_base : NULL, - cmd->msg.hdr.id, 0, cmd->msg.hdr.data); + ret = smr_complete_rx(ep, (void *) cmd->hdr.tx_ctx, + cmd->hdr.op, + smr_rx_cq_flags(0, cmd->hdr.op_flags), + total_len, + iov_count ? iov[0].iov_base : NULL, + cmd->hdr.id, 0, cmd->hdr.cq_data); } if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "unable to process rx completion\n"); + "unable to process rx completion\n"); } out: return ret; } -static int smr_progress_cmd_atomic(struct smr_ep *ep, struct smr_cmd *cmd, - struct smr_cmd *rma_cmd) +static int smr_progress_cmd_atomic(struct smr_ep *ep, struct smr_cmd *cmd) { - struct smr_region *peer_smr; struct smr_domain *domain; - struct smr_resp *resp; struct ofi_mr *mr[SMR_IOV_LIMIT]; struct fi_ioc ioc[SMR_IOV_LIMIT]; - size_t ioc_count; - size_t total_len = 0; + struct fi_rma_ioc *rma_ioc; + size_t ioc_count, dt_size, total_len = 0; int err = 0, ret = 0; domain = container_of(ep->util_ep.domain, struct smr_domain, util_domain); + dt_size = ofi_datatype_size(cmd->hdr.datatype); ofi_genlock_lock(&domain->util_domain.lock); - for (ioc_count = 0; ioc_count < rma_cmd->rma.rma_count; ioc_count++) { + for (ioc_count = 0; ioc_count < cmd->rma.rma_count; ioc_count++) { + rma_ioc = &cmd->rma.rma_ioc[ioc_count]; ret = ofi_mr_map_verify(&domain->util_domain.mr_map, - (uintptr_t *) &(rma_cmd->rma.rma_ioc[ioc_count].addr), - rma_cmd->rma.rma_ioc[ioc_count].count * - ofi_datatype_size(cmd->msg.hdr.datatype), - rma_cmd->rma.rma_ioc[ioc_count].key, - ofi_rx_mr_reg_flags(cmd->msg.hdr.op, - cmd->msg.hdr.atomic_op), - (void **) &mr[ioc_count]); + (uintptr_t *) &(rma_ioc->addr), + rma_ioc->count * dt_size, + rma_ioc->key, + ofi_rx_mr_reg_flags(cmd->hdr.op, + cmd->hdr.atomic_op), + (void **) &mr[ioc_count]); if (ret) break; - ioc[ioc_count].addr = (void *) rma_cmd->rma.rma_ioc[ioc_count].addr; - ioc[ioc_count].count = rma_cmd->rma.rma_ioc[ioc_count].count; + ioc[ioc_count].addr = (void *) rma_ioc->addr; + ioc[ioc_count].count = rma_ioc->count; } ofi_genlock_unlock(&domain->util_domain.lock); if (ret) goto out; - switch (cmd->msg.hdr.op_src) { - case smr_src_inline: + switch (cmd->hdr.proto) { + case smr_proto_inline: err = smr_progress_inline_atomic(cmd, mr, ioc, ioc_count, &total_len); break; - case smr_src_inject: + case smr_proto_inject: err = smr_progress_inject_atomic(cmd, mr, ioc, ioc_count, &total_len, ep, ret); break; @@ -1163,30 +1172,20 @@ static int smr_progress_cmd_atomic(struct smr_ep *ep, struct smr_cmd *cmd, "unidentified operation type\n"); err = -FI_EINVAL; } - if (cmd->msg.hdr.data) { - peer_smr = smr_peer_region(ep->region, cmd->msg.hdr.id); - resp = smr_get_ptr(peer_smr, cmd->msg.hdr.data); - /* - * smr_do_atomic will do memcpy when flags has SMR_RMA_REQ. - * Add a memory barrier before updating resp status to ensure - * the buffer is ready before the status update. - */ - if (cmd->msg.hdr.op_flags & SMR_RMA_REQ) - ofi_wmb(); - resp->status = -err; - } + cmd->hdr.status = -err; if (err) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "error processing atomic op\n"); ret = smr_write_err_comp(ep->util_ep.rx_cq, NULL, - smr_rx_cq_flags(0, cmd->msg.hdr.op_flags), - 0, err); + smr_rx_cq_flags(0, + cmd->hdr.op_flags), 0, err); } else { - ret = smr_complete_rx(ep, NULL, cmd->msg.hdr.op, - smr_rx_cq_flags(0, cmd->msg.hdr.op_flags), - total_len, ioc_count ? ioc[0].addr : NULL, - cmd->msg.hdr.id, 0, cmd->msg.hdr.data); + ret = smr_complete_rx(ep, NULL, cmd->hdr.op, + smr_rx_cq_flags(0, + cmd->hdr.op_flags), total_len, + ioc_count ? ioc[0].addr : NULL, + cmd->hdr.id, 0, cmd->hdr.cq_data); } if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -1201,6 +1200,7 @@ static int smr_progress_cmd_atomic(struct smr_ep *ep, struct smr_cmd *cmd, static void smr_progress_cmd(struct smr_ep *ep) { struct smr_cmd_entry *ce; + struct smr_cmd *cmd; int ret = 0; int64_t pos; @@ -1221,29 +1221,28 @@ static void smr_progress_cmd(struct smr_ep *ep) ret = smr_cmd_queue_head(smr_cmd_queue(ep->region), &ce, &pos); if (ret == -FI_ENOENT) break; - switch (ce->cmd.msg.hdr.op) { + + cmd = (struct smr_cmd *) ce->ptr; + switch (cmd->hdr.op) { case ofi_op_msg: case ofi_op_tagged: - ret = smr_progress_cmd_msg(ep, &ce->cmd); + ret = smr_progress_cmd_msg(ep, cmd); break; case ofi_op_write: case ofi_op_read_req: - ret = smr_progress_cmd_rma(ep, &ce->cmd, - &ce->rma_cmd); + ret = smr_progress_cmd_rma(ep, cmd); break; case ofi_op_write_async: case ofi_op_read_async: - ofi_ep_peer_rx_cntr_inc(&ep->util_ep, - ce->cmd.msg.hdr.op); + ofi_ep_peer_rx_cntr_inc(&ep->util_ep, cmd->hdr.op); break; case ofi_op_atomic: case ofi_op_atomic_fetch: case ofi_op_atomic_compare: - ret = smr_progress_cmd_atomic(ep, &ce->cmd, - &ce->rma_cmd); + ret = smr_progress_cmd_atomic(ep, cmd); break; case SMR_OP_MAX + ofi_ctrl_connreq: - smr_progress_connreq(ep, &ce->cmd); + smr_progress_connreq(ep, cmd); break; default: FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -1265,11 +1264,9 @@ static void smr_progress_cmd(struct smr_ep *ep) void smr_progress_ipc_list(struct smr_ep *ep) { struct smr_pend_entry *ipc_entry; - struct smr_region *peer_smr; struct smr_domain *domain; enum fi_hmem_iface iface; struct dlist_entry *tmp; - struct smr_resp *resp; uint64_t device; uint64_t flags; void *context; @@ -1282,10 +1279,8 @@ void smr_progress_ipc_list(struct smr_ep *ep) dlist_foreach_container_safe(&ep->ipc_cpy_pend_list, struct smr_pend_entry, ipc_entry, entry, tmp) { - iface = ipc_entry->cmd.msg.data.ipc_info.iface; - device = ipc_entry->cmd.msg.data.ipc_info.device; - peer_smr = smr_peer_region(ep->region, ipc_entry->cmd.msg.hdr.id); - resp = smr_get_ptr(peer_smr, ipc_entry->cmd.msg.hdr.src_data); + iface = ipc_entry->cmd->data.ipc_info.iface; + device = ipc_entry->cmd->data.ipc_info.device; if (ofi_async_copy_query(iface, ipc_entry->async_event)) continue; @@ -1293,18 +1288,19 @@ void smr_progress_ipc_list(struct smr_ep *ep) if (ipc_entry->rx_entry) { context = ipc_entry->rx_entry->context; flags = smr_rx_cq_flags(ipc_entry->rx_entry->flags, - ipc_entry->cmd.msg.hdr.op_flags); + ipc_entry->cmd->hdr.op_flags); } else { context = NULL; - flags = smr_rx_cq_flags(0, ipc_entry->cmd.msg.hdr.op_flags); + flags = smr_rx_cq_flags(0, + ipc_entry->cmd->hdr.op_flags); } - ret = smr_complete_rx(ep, context, ipc_entry->cmd.msg.hdr.op, - flags, ipc_entry->cmd.msg.hdr.size, - ipc_entry->iov[0].iov_base, - ipc_entry->cmd.msg.hdr.id, - ipc_entry->cmd.msg.hdr.tag, - ipc_entry->cmd.msg.hdr.data); + ret = smr_complete_rx(ep, context, ipc_entry->cmd->hdr.op, + flags, ipc_entry->cmd->hdr.size, + ipc_entry->iov[0].iov_base, + ipc_entry->cmd->hdr.id, + ipc_entry->cmd->hdr.tag, + ipc_entry->cmd->hdr.cq_data); if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "unable to process rx completion\n"); @@ -1315,7 +1311,6 @@ void smr_progress_ipc_list(struct smr_ep *ep) * ensure that the tx_complete occurs after the sending * buffer is now free to be reused */ - resp->status = SMR_STATUS_SUCCESS; ofi_mr_cache_delete(domain->ipc_cache, ipc_entry->ipc_entry); ofi_free_async_copy_event(iface, device, @@ -1324,117 +1319,9 @@ void smr_progress_ipc_list(struct smr_ep *ep) if (ipc_entry->rx_entry) ep->srx->owner_ops->free_entry(ipc_entry->rx_entry); ofi_buf_free(ipc_entry); - } -} -static void smr_buffer_sar(struct smr_ep *ep, struct smr_region *peer_smr, - struct smr_resp *resp, struct smr_pend_entry *sar_entry) -{ - struct smr_sar_buf *sar_buf; - struct smr_unexp_buf *buf; - size_t bytes; - int next_buf = 0; - - while (next_buf < sar_entry->cmd.msg.data.buf_batch_size && - sar_entry->bytes_done < sar_entry->cmd.msg.hdr.size) { - buf = ofi_buf_alloc(ep->unexp_buf_pool); - if (!buf) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "Error allocating buffer for unexpected SAR " - "(-FI_ENOMEM)\n"); - return; - } - slist_insert_tail(&buf->entry, - &sar_entry->cmd_ctx->buf_list); - - sar_buf = smr_freestack_get_entry_from_index( - smr_sar_pool(ep->region), - sar_entry->cmd.msg.data.sar[next_buf]); - bytes = MIN(sar_entry->cmd.msg.hdr.size - - sar_entry->bytes_done, - SMR_SAR_SIZE); - - memcpy(buf->buf, sar_buf->buf, bytes); - - sar_entry->bytes_done += bytes; - next_buf++; + smr_return_cmd(ep, ipc_entry->cmd); } - ofi_wmb(); - resp->status = SMR_STATUS_SAR_EMPTY; -} - -static void smr_progress_sar_list(struct smr_ep *ep) -{ - struct smr_region *peer_smr; - struct smr_pend_entry *sar_entry; - struct smr_resp *resp; - struct dlist_entry *tmp; - void *comp_ctx; - uint64_t comp_flags; - int ret; - - ofi_genlock_lock(&ep->util_ep.lock); - dlist_foreach_container_safe(&ep->sar_list, struct smr_pend_entry, - sar_entry, entry, tmp) { - peer_smr = smr_peer_region(ep->region, sar_entry->cmd.msg.hdr.id); - resp = smr_get_ptr(peer_smr, sar_entry->cmd.msg.hdr.src_data); - if (sar_entry->cmd.msg.hdr.op == ofi_op_read_req) { - smr_try_progress_to_sar(ep, peer_smr, smr_sar_pool(ep->region), - resp, &sar_entry->cmd, sar_entry->mr, - sar_entry->iov, sar_entry->iov_count, - &sar_entry->bytes_done, sar_entry); - } else { - if (sar_entry->cmd_ctx) { - if (resp->status != SMR_STATUS_SAR_FULL) - continue; - smr_buffer_sar(ep, peer_smr, resp, sar_entry); - } else { - smr_try_progress_from_sar(ep, peer_smr, smr_sar_pool(ep->region), - resp, &sar_entry->cmd, sar_entry->mr, - sar_entry->iov, - sar_entry->iov_count, - &sar_entry->bytes_done, - sar_entry); - } - } - - if (sar_entry->bytes_done == sar_entry->cmd.msg.hdr.size) { - if (sar_entry->cmd_ctx) { - sar_entry->cmd_ctx->sar_entry = NULL; - dlist_remove(&sar_entry->entry); - ofi_buf_free(sar_entry); - continue; - } - - if (sar_entry->rx_entry) { - comp_ctx = sar_entry->rx_entry->context; - comp_flags = smr_rx_cq_flags( - sar_entry->rx_entry->flags, - sar_entry->cmd.msg.hdr.op_flags); - } else { - comp_ctx = NULL; - comp_flags = smr_rx_cq_flags(0, - sar_entry->cmd.msg.hdr.op_flags); - } - ret = smr_complete_rx(ep, comp_ctx, - sar_entry->cmd.msg.hdr.op, comp_flags, - sar_entry->bytes_done, - sar_entry->iov[0].iov_base, - sar_entry->cmd.msg.hdr.id, - sar_entry->cmd.msg.hdr.tag, - sar_entry->cmd.msg.hdr.data); - if (ret) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "unable to process rx completion\n"); - } - if (sar_entry->rx_entry) - ep->srx->owner_ops->free_entry(sar_entry->rx_entry); - - dlist_remove(&sar_entry->entry); - ofi_buf_free(sar_entry); - } - } - ofi_genlock_unlock(&ep->util_ep.lock); } void smr_ep_progress(struct util_ep *util_ep) @@ -1445,8 +1332,12 @@ void smr_ep_progress(struct util_ep *util_ep) if (smr_env.use_dsa_sar) smr_dsa_progress(ep); - smr_progress_resp(ep); - smr_progress_sar_list(ep); + + smr_progress_return(ep); + + if (!slist_empty(&ep->overflow_list)) + smr_progress_overflow(ep); + smr_progress_cmd(ep); /* always drive forward the ipc list since the completion is diff --git a/prov/shm/src/smr_rma.c b/prov/shm/src/smr_rma.c index c12f6cdd89d..426b997528c 100644 --- a/prov/shm/src/smr_rma.c +++ b/prov/shm/src/smr_rma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2021 Intel Corporation. All rights reserved + * Copyright (c) Intel Corporation. All rights reserved * (C) Copyright 2021 Amazon.com, Inc. or its affiliates. * * This software is available to you under a choice of one of two @@ -34,26 +34,27 @@ #include "smr.h" static void smr_add_rma_cmd(struct smr_region *peer_smr, - const struct fi_rma_iov *rma_iov, size_t iov_count, - struct smr_cmd_entry *ce) + const struct fi_rma_iov *rma_iov, size_t iov_count, + struct smr_cmd *cmd) { - ce->rma_cmd.rma.rma_count = iov_count; - memcpy(ce->rma_cmd.rma.rma_iov, rma_iov, sizeof(*rma_iov) * iov_count); + cmd->rma.rma_count = iov_count; + memcpy(cmd->rma.rma_iov, rma_iov, sizeof(*rma_iov) * iov_count); } static void smr_format_rma_resp(struct smr_cmd *cmd, fi_addr_t peer_id, const struct fi_rma_iov *rma_iov, size_t count, - size_t total_len, uint32_t op, uint64_t op_flags) + size_t total_len, uint32_t op, + uint64_t op_flags) { smr_generic_format(cmd, peer_id, op, 0, 0, op_flags); - cmd->msg.hdr.size = total_len; + cmd->hdr.size = total_len; } static ssize_t smr_rma_fast(struct smr_ep *ep, struct smr_region *peer_smr, - const struct iovec *iov, size_t iov_count, - const struct fi_rma_iov *rma_iov, size_t rma_count, - void **desc, int peer_id, int id, void *context, - uint32_t op, uint64_t op_flags) + const struct iovec *iov, size_t iov_count, + const struct fi_rma_iov *rma_iov, size_t rma_count, + void **desc, int peer_id, int id, void *context, + uint32_t op, uint64_t op_flags) { struct iovec vma_iovec[SMR_IOV_LIMIT], rma_iovec[SMR_IOV_LIMIT]; struct ofi_xpmem_client *xpmem; @@ -88,22 +89,26 @@ static ssize_t smr_rma_fast(struct smr_ep *ep, struct smr_region *peer_smr, smr_format_rma_resp(&ce->cmd, peer_id, rma_iov, rma_count, total_len, (op == ofi_op_write) ? ofi_op_write_async : ofi_op_read_async, op_flags); + + ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); smr_cmd_queue_commit(ce, pos); return FI_SUCCESS; } -static ssize_t smr_generic_rma(struct smr_ep *ep, const struct iovec *iov, - size_t iov_count, const struct fi_rma_iov *rma_iov, size_t rma_count, - void **desc, fi_addr_t addr, void *context, uint32_t op, uint64_t data, - uint64_t op_flags) +static ssize_t smr_generic_rma( + struct smr_ep *ep, const struct iovec *iov, + size_t iov_count, const struct fi_rma_iov *rma_iov, + size_t rma_count, void **desc, fi_addr_t addr, void *context, + uint32_t op, uint64_t data, uint64_t op_flags) { struct smr_domain *domain; struct smr_region *peer_smr; int64_t id, peer_id; - int cmds, err = 0, proto = smr_src_inline; + int cmds, err = 0, proto = smr_proto_inline; ssize_t ret = 0; size_t total_len; struct smr_cmd_entry *ce; + struct smr_cmd *cmd; int64_t pos; assert(iov_count <= SMR_IOV_LIMIT); @@ -111,14 +116,15 @@ static ssize_t smr_generic_rma(struct smr_ep *ep, const struct iovec *iov, assert(ofi_total_iov_len(iov, iov_count) == ofi_total_rma_iov_len(rma_iov, rma_count)); - domain = container_of(ep->util_ep.domain, struct smr_domain, util_domain); + domain = container_of(ep->util_ep.domain, struct smr_domain, + util_domain); id = smr_verify_peer(ep, addr); if (id < 0) return -FI_EAGAIN; - peer_id = smr_peer_data(ep->region)[id].addr.id; - peer_smr = smr_peer_region(ep->region, id); + peer_id = smr_peer_data(ep->region)[id].id; + peer_smr = smr_peer_region(ep, id); cmds = 1 + !(domain->fast_rma && !(op_flags & (FI_REMOTE_CQ_DATA | FI_DELIVERY_COMPLETE)) && @@ -156,7 +162,6 @@ static ssize_t smr_generic_rma(struct smr_ep *ep, const struct iovec *iov, ret = smr_cmd_queue_next(smr_cmd_queue(peer_smr), &ce, &pos); if (ret == -FI_ENOENT) { - /* kick the peer to process any outstanding commands */ ret = -FI_EAGAIN; goto unlock; } @@ -167,19 +172,34 @@ static ssize_t smr_generic_rma(struct smr_ep *ep, const struct iovec *iov, proto = smr_select_proto(desc, iov_count, smr_vma_enabled(ep, peer_smr), smr_ipc_valid(ep, peer_smr, id, peer_id), op, total_len, op_flags); - + if (proto != smr_proto_inline) { + if (smr_freestack_isempty(smr_cmd_stack(ep->region))) { + smr_cmd_queue_discard(ce, pos); + ret = -FI_EAGAIN; + goto unlock; + } + cmd = smr_freestack_pop(smr_cmd_stack(ep->region)); + assert(cmd); + ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); + } else { + cmd = &ce->cmd; + ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); + } ret = smr_proto_ops[proto](ep, peer_smr, id, peer_id, op, 0, data, op_flags, (struct ofi_mr **)desc, iov, - iov_count, total_len, context, &ce->cmd); + iov_count, total_len, context, cmd); if (ret) { + if (proto != smr_proto_inline) + smr_freestack_push(smr_cmd_stack(ep->region), cmd); smr_cmd_queue_discard(ce, pos); goto unlock; } - smr_add_rma_cmd(peer_smr, rma_iov, rma_count, ce); + smr_add_rma_cmd(peer_smr, rma_iov, rma_count, cmd); smr_cmd_queue_commit(ce, pos); - if (proto != smr_src_inline && proto != smr_src_inject) + if ((proto != smr_proto_inline && proto != smr_proto_inject) || + (op == ofi_op_read_req)) goto unlock; ret = smr_complete_tx(ep, context, op, op_flags); @@ -310,21 +330,23 @@ static ssize_t smr_generic_rma_inject(struct fid_ep *ep_fid, const void *buf, struct iovec iov; struct fi_rma_iov rma_iov; int64_t id, peer_id; - int cmds, proto = smr_src_inline; + int cmds, proto = smr_proto_inline; ssize_t ret = 0; + struct smr_cmd *cmd; struct smr_cmd_entry *ce; int64_t pos; assert(len <= SMR_INJECT_SIZE); ep = container_of(ep_fid, struct smr_ep, util_ep.ep_fid.fid); - domain = container_of(ep->util_ep.domain, struct smr_domain, util_domain); + domain = container_of(ep->util_ep.domain, struct smr_domain, + util_domain); id = smr_verify_peer(ep, dest_addr); if (id < 0) return -FI_EAGAIN; - peer_id = smr_peer_data(ep->region)[id].addr.id; - peer_smr = smr_peer_region(ep->region, id); + peer_id = smr_peer_data(ep->region)[id].id; + peer_smr = smr_peer_region(ep, id); cmds = 1 + !(domain->fast_rma && !(flags & FI_REMOTE_CQ_DATA) && smr_vma_enabled(ep, peer_smr)); @@ -338,6 +360,8 @@ static ssize_t smr_generic_rma_inject(struct fid_ep *ep_fid, const void *buf, rma_iov.len = len; rma_iov.key = key; + ofi_genlock_lock(&ep->util_ep.lock); + if (cmds == 1) { ret = smr_rma_fast(ep, peer_smr, &iov, 1, &rma_iov, 1, NULL, peer_id, id, NULL, ofi_op_write, flags); @@ -348,19 +372,37 @@ static ssize_t smr_generic_rma_inject(struct fid_ep *ep_fid, const void *buf, if (ret == -FI_ENOENT) return -FI_EAGAIN; - proto = len <= SMR_MSG_DATA_LEN ? smr_src_inline : smr_src_inject; + if (len <= SMR_MSG_DATA_LEN) { + proto = smr_proto_inline; + cmd = &ce->cmd; + ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); + } else { + proto = smr_proto_inject; + if (smr_freestack_isempty(smr_cmd_stack(ep->region))) { + smr_cmd_queue_discard(ce, pos); + ret = -FI_EAGAIN; + goto unlock; + } + + cmd = smr_freestack_pop(smr_cmd_stack(ep->region)); + assert(cmd); + ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); + } + ret = smr_proto_ops[proto](ep, peer_smr, id, peer_id, ofi_op_write, 0, - data, flags, NULL, &iov, 1, len, NULL, &ce->cmd); + data, flags, NULL, &iov, 1, len, NULL, cmd); if (ret) { smr_cmd_queue_discard(ce, pos); - return -FI_EAGAIN; + goto unlock; } - smr_add_rma_cmd(peer_smr, &rma_iov, 1, ce); + smr_add_rma_cmd(peer_smr, &rma_iov, 1, cmd); smr_cmd_queue_commit(ce, pos); out: if (!ret) ofi_ep_peer_tx_cntr_inc(&ep->util_ep, ofi_op_write); +unlock: + ofi_genlock_unlock(&ep->util_ep.lock); return ret; } @@ -380,8 +422,8 @@ static ssize_t smr_writedata(struct fid_ep *ep_fid, const void *buf, size_t len, rma_iov.len = len; rma_iov.key = key; - return smr_generic_rma(ep, &iov, 1, &rma_iov, 1, &desc, dest_addr, context, - ofi_op_write, data, + return smr_generic_rma(ep, &iov, 1, &rma_iov, 1, &desc, dest_addr, + context, ofi_op_write, data, FI_REMOTE_CQ_DATA | smr_ep_tx_flags(ep)); } @@ -394,8 +436,9 @@ static ssize_t smr_rma_inject(struct fid_ep *ep_fid, const void *buf, } static ssize_t smr_inject_writedata(struct fid_ep *ep_fid, const void *buf, - size_t len, uint64_t data, fi_addr_t dest_addr, - uint64_t addr, uint64_t key) + size_t len, uint64_t data, + fi_addr_t dest_addr, uint64_t addr, + uint64_t key) { return smr_generic_rma_inject(ep_fid, buf, len, dest_addr, addr, key, data, FI_REMOTE_CQ_DATA); diff --git a/prov/shm/src/smr_signal.h b/prov/shm/src/smr_signal.h index 563a64bfaf1..1c549655fb4 100644 --- a/prov/shm/src/smr_signal.h +++ b/prov/shm/src/smr_signal.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2019 Amazon.com, Inc. or its affiliates. - * Copyright (c) 2020-2021 Intel Corporation. + * Copyright (c) Intel Corporation. * All rights reserved. * * This software is available to you under a choice of one of two @@ -42,7 +42,6 @@ extern struct sigaction *old_action; static void smr_handle_signal(int signum, siginfo_t *info, void *ucontext) { struct smr_ep_name *ep_name; - struct smr_sock_name *sock_name; int ret; pthread_mutex_lock(&ep_list_lock); @@ -52,13 +51,6 @@ static void smr_handle_signal(int signum, siginfo_t *info, void *ucontext) } pthread_mutex_unlock(&ep_list_lock); - pthread_mutex_lock(&sock_list_lock); - dlist_foreach_container(&sock_name_list, struct smr_sock_name, - sock_name, entry) { - unlink(sock_name->name); - } - pthread_mutex_unlock(&sock_list_lock); - /* Register the original signum handler, SIG_DFL or otherwise */ ret = sigaction(signum, &old_action[signum], NULL); if (ret) diff --git a/prov/shm/src/smr_util.c b/prov/shm/src/smr_util.c index fd67ecb6244..7e2750ee98b 100644 --- a/prov/shm/src/smr_util.c +++ b/prov/shm/src/smr_util.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021 Intel Corporation. All rights reserved. + * Copyright (c) Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -77,49 +77,45 @@ void smr_cma_check(struct smr_region *smr, struct smr_region *peer_smr) } size_t smr_calculate_size_offsets(size_t tx_count, size_t rx_count, - size_t *cmd_offset, size_t *resp_offset, - size_t *inject_offset, size_t *sar_offset, - size_t *peer_offset, size_t *name_offset, - size_t *sock_offset) + size_t *cmd_offset, size_t *cs_offset, + size_t *inject_offset, size_t *rq_offset, + size_t *sar_offset, size_t *peer_offset) { - size_t cmd_queue_offset, resp_queue_offset, inject_pool_offset; - size_t sar_pool_offset, peer_data_offset, ep_name_offset; - size_t tx_size, rx_size, total_size, sock_name_offset; + size_t cmd_queue_offset, cmd_stack_offset, inject_pool_offset; + size_t ret_queue_offset, sar_pool_offset, peer_data_offset; + size_t tx_size, rx_size, total_size; tx_size = roundup_power_of_two(tx_count); rx_size = roundup_power_of_two(rx_count); /* Align cmd_queue offset to cache line */ cmd_queue_offset = ofi_get_aligned_size(sizeof(struct smr_region), 64); - resp_queue_offset = cmd_queue_offset + sizeof(struct smr_cmd_queue) + + cmd_stack_offset = cmd_queue_offset + sizeof(struct smr_cmd_queue) + sizeof(struct smr_cmd_queue_entry) * rx_size; - inject_pool_offset = resp_queue_offset + sizeof(struct smr_resp_queue) + - sizeof(struct smr_resp) * tx_size; - sar_pool_offset = inject_pool_offset + - freestack_size(sizeof(struct smr_inject_buf), rx_size); + inject_pool_offset = cmd_stack_offset + + freestack_size(sizeof(struct smr_cmd), tx_size); + ret_queue_offset = inject_pool_offset + + freestack_size(sizeof(struct smr_inject_buf), tx_size); + ret_queue_offset = ofi_get_aligned_size(ret_queue_offset, 64); + sar_pool_offset = ret_queue_offset + sizeof(struct smr_return_queue) + + sizeof(struct smr_return_queue_entry) * tx_size; peer_data_offset = sar_pool_offset + freestack_size(sizeof(struct smr_sar_buf), SMR_MAX_PEERS); - ep_name_offset = peer_data_offset + sizeof(struct smr_peer_data) * + total_size = peer_data_offset + sizeof(struct smr_peer_data) * SMR_MAX_PEERS; - sock_name_offset = ep_name_offset + SMR_NAME_MAX; - if (cmd_offset) *cmd_offset = cmd_queue_offset; - if (resp_offset) - *resp_offset = resp_queue_offset; + if (cs_offset) + *cs_offset = cmd_stack_offset; if (inject_offset) *inject_offset = inject_pool_offset; + if (rq_offset) + *rq_offset = ret_queue_offset; if (sar_offset) *sar_offset = sar_pool_offset; if (peer_offset) *peer_offset = peer_data_offset; - if (name_offset) - *name_offset = ep_name_offset; - if (sock_offset) - *sock_offset = sock_name_offset; - - total_size = sock_name_offset + SMR_SOCK_NAME_MAX; /* * Revisit later to see if we really need the size adjustment, or @@ -169,29 +165,24 @@ static int smr_retry_map(const char *name, int *fd) return -FI_EBUSY; } -static void smr_lock_init(pthread_spinlock_t *lock) -{ - pthread_spin_init(lock, PTHREAD_PROCESS_SHARED); -} - /* TODO: Determine if aligning SMR data helps performance */ int smr_create(const struct fi_provider *prov, struct smr_map *map, const struct smr_attr *attr, struct smr_region *volatile *smr) { struct smr_ep_name *ep_name; - size_t total_size, cmd_queue_offset, peer_data_offset; - size_t resp_queue_offset, inject_pool_offset, name_offset; - size_t sar_pool_offset, sock_name_offset; + size_t total_size, cmd_queue_offset, ret_queue_offset, peer_data_offset; + size_t cmd_stack_offset, inject_pool_offset, sar_pool_offset; int fd, ret, i; void *mapped_addr; size_t tx_size, rx_size; tx_size = roundup_power_of_two(attr->tx_count); rx_size = roundup_power_of_two(attr->rx_count); - total_size = smr_calculate_size_offsets(tx_size, rx_size, &cmd_queue_offset, - &resp_queue_offset, &inject_pool_offset, - &sar_pool_offset, &peer_data_offset, - &name_offset, &sock_name_offset); + total_size = smr_calculate_size_offsets( + tx_size, rx_size, &cmd_queue_offset, + &cmd_stack_offset, &inject_pool_offset, + &ret_queue_offset, &sar_pool_offset, + &peer_data_offset); fd = shm_open(attr->name, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR); if (fd < 0) { @@ -252,9 +243,7 @@ int smr_create(const struct fi_provider *prov, struct smr_map *map, pthread_mutex_unlock(&ep_list_lock); *smr = mapped_addr; - smr_lock_init(&(*smr)->lock); - (*smr)->map = map; (*smr)->version = SMR_VERSION; (*smr)->flags = attr->flags; @@ -278,28 +267,30 @@ int smr_create(const struct fi_provider *prov, struct smr_map *map, (*smr)->total_size = total_size; (*smr)->cmd_queue_offset = cmd_queue_offset; - (*smr)->resp_queue_offset = resp_queue_offset; + (*smr)->cmd_stack_offset = cmd_stack_offset; (*smr)->inject_pool_offset = inject_pool_offset; + (*smr)->ret_queue_offset = ret_queue_offset; (*smr)->sar_pool_offset = sar_pool_offset; (*smr)->peer_data_offset = peer_data_offset; - (*smr)->name_offset = name_offset; - (*smr)->sock_name_offset = sock_name_offset; (*smr)->max_sar_buf_per_peer = SMR_BUF_BATCH_MAX; smr_cmd_queue_init(smr_cmd_queue(*smr), rx_size); - smr_resp_queue_init(smr_resp_queue(*smr), tx_size); + smr_return_queue_init(smr_return_queue(*smr), tx_size); + + smr_freestack_init(smr_cmd_stack(*smr), tx_size, + sizeof(struct smr_cmd_entry)); smr_freestack_init(smr_inject_pool(*smr), rx_size, sizeof(struct smr_inject_buf)); smr_freestack_init(smr_sar_pool(*smr), SMR_MAX_PEERS, sizeof(struct smr_sar_buf)); for (i = 0; i < SMR_MAX_PEERS; i++) { - smr_peer_data(*smr)[i].addr.id = -1; + smr_peer_data(*smr)[i].id = -1; smr_peer_data(*smr)[i].sar_status = 0; smr_peer_data(*smr)[i].name_sent = 0; smr_peer_data(*smr)[i].xpmem.cap = SMR_VMA_CAP_OFF; } - strncpy((char *) smr_name(*smr), attr->name, total_size - name_offset); + strcpy((*smr)->name, attr->name); /* Must be set last to signal full initialization to peers */ (*smr)->pid = getpid(); @@ -319,7 +310,7 @@ void smr_free(struct smr_region *smr) { if (smr->flags & SMR_FLAG_HMEM_ENABLED) (void) ofi_hmem_host_unregister(smr); - shm_unlink(smr_name(smr)); + shm_unlink(smr->name); munmap(smr, smr->total_size); } @@ -341,7 +332,7 @@ int smr_map_to_region(const struct fi_provider *prov, struct smr_map *map, int fd, ret = 0; struct stat sts; struct dlist_entry *entry; - const char *name = smr_no_prefix(peer_buf->peer.name); + const char *name = smr_no_prefix(peer_buf->name); char tmp[SMR_PATH_MAX]; pthread_mutex_lock(&ep_list_lock); @@ -418,7 +409,7 @@ int smr_map_to_region(const struct fi_provider *prov, struct smr_map *map, dlist_foreach_container(&av->util_av.ep_list, struct util_ep, util_ep, av_entry) { smr_ep = container_of(util_ep, struct smr_ep, util_ep); - smr_map_to_endpoint(smr_ep->region, id); + smr_map_to_endpoint(smr_ep, id); } out: @@ -426,31 +417,37 @@ int smr_map_to_region(const struct fi_provider *prov, struct smr_map *map, return ret; } -void smr_map_to_endpoint(struct smr_region *region, int64_t id) +void smr_map_to_endpoint(struct smr_ep *ep, int64_t id) { int ret; struct smr_region *peer_smr; + struct smr_av *av; struct smr_peer_data *local_peers; - assert(ofi_spin_held(®ion->map->lock)); - peer_smr = smr_peer_region(region, id); - if (region->map->peers[id].peer.id < 0 || !peer_smr) + av = container_of(ep->util_ep.av, struct smr_av, util_av); + + assert(ofi_spin_held(&av->smr_map.lock)); + peer_smr = smr_peer_region(ep, id); + if (!av->smr_map.peers[id].id_assigned || !peer_smr) return; - local_peers = smr_peer_data(region); + local_peers = smr_peer_data(ep->region); + local_peers[id].local_region = (uintptr_t) peer_smr; - if ((region != peer_smr && region->cma_cap_peer == SMR_VMA_CAP_NA) || - (region == peer_smr && region->cma_cap_self == SMR_VMA_CAP_NA)) - smr_cma_check(region, peer_smr); + if ((ep->region != peer_smr && + ep->region->cma_cap_peer == SMR_VMA_CAP_NA) || + (ep->region == peer_smr && + ep->region->cma_cap_self == SMR_VMA_CAP_NA)) + smr_cma_check(ep->region, peer_smr); /* enable xpmem locally if the peer also has it enabled */ if (peer_smr->xpmem_cap_self == SMR_VMA_CAP_ON && - region->xpmem_cap_self == SMR_VMA_CAP_ON) { + ep->region->xpmem_cap_self == SMR_VMA_CAP_ON) { ret = ofi_xpmem_enable(&peer_smr->xpmem_self, &local_peers[id].xpmem); if (ret) { local_peers[id].xpmem.cap = SMR_VMA_CAP_OFF; - region->xpmem_cap_self = SMR_VMA_CAP_OFF; + ep->region->xpmem_cap_self = SMR_VMA_CAP_OFF; return; } local_peers[id].xpmem.cap = SMR_VMA_CAP_ON; @@ -459,7 +456,7 @@ void smr_map_to_endpoint(struct smr_region *region, int64_t id) local_peers[id].xpmem.cap = SMR_VMA_CAP_OFF; } - smr_set_ipc_valid(region, id); + smr_set_ipc_valid(ep, id); return; } @@ -484,7 +481,7 @@ void smr_unmap_region(const struct fi_provider *prov, struct smr_map *map, dlist_foreach_container(&av->util_av.ep_list, struct util_ep, util_ep, av_entry) { smr_ep = container_of(util_ep, struct smr_ep, util_ep); - smr_unmap_from_endpoint(smr_ep->region, peer_id); + smr_unmap_from_endpoint(smr_ep, peer_id); } /* Don't unmap memory owned by this pid because the endpoint it belongs @@ -509,36 +506,40 @@ void smr_unmap_region(const struct fi_provider *prov, struct smr_map *map, peer->region = NULL; } -void smr_unmap_from_endpoint(struct smr_region *region, int64_t id) +void smr_unmap_from_endpoint(struct smr_ep *ep, int64_t id) { struct smr_region *peer_smr; + struct smr_av *av; struct smr_peer_data *local_peers, *peer_peers; int64_t peer_id; - if (region->map->peers[id].peer.id < 0) + av = container_of(ep->util_ep.av, struct smr_av, util_av); + if (!av->smr_map.peers[id].id_assigned) return; - peer_smr = smr_peer_region(region, id); + peer_smr = smr_peer_region(ep, id); assert(peer_smr); peer_peers = smr_peer_data(peer_smr); - peer_id = smr_peer_data(region)[id].addr.id; + peer_id = smr_peer_data(ep->region)[id].id; - peer_peers[peer_id].addr.id = -1; + peer_peers[peer_id].id = -1; peer_peers[peer_id].name_sent = 0; - local_peers = smr_peer_data(region); + local_peers = smr_peer_data(ep->region); ofi_xpmem_release(&local_peers[peer_id].xpmem); } -void smr_exchange_all_peers(struct smr_region *region) +void smr_exchange_all_peers(struct smr_ep *ep) { + struct smr_av *av; int64_t i; - ofi_spin_lock(®ion->map->lock); + av = container_of(ep->util_ep.av, struct smr_av, util_av); + ofi_spin_lock(&av->smr_map.lock); for (i = 0; i < SMR_MAX_PEERS; i++) - smr_map_to_endpoint(region, i); + smr_map_to_endpoint(ep, i); - ofi_spin_unlock(®ion->map->lock); + ofi_spin_unlock(&av->smr_map.lock); } int smr_map_add(const struct fi_provider *prov, struct smr_map *map, @@ -556,7 +557,7 @@ int smr_map_add(const struct fi_provider *prov, struct smr_map *map, goto out; } - while (map->peers[map->cur_id].peer.id != -1 && tries < SMR_MAX_PEERS) { + while (map->peers[map->cur_id].id_assigned && tries < SMR_MAX_PEERS) { if (++map->cur_id == SMR_MAX_PEERS) map->cur_id = 0; tries++; @@ -567,11 +568,11 @@ int smr_map_add(const struct fi_provider *prov, struct smr_map *map, if (++map->cur_id == SMR_MAX_PEERS) map->cur_id = 0; node->data = (void *) (intptr_t) *id; - strncpy(map->peers[*id].peer.name, name, SMR_NAME_MAX); - map->peers[*id].peer.name[SMR_NAME_MAX - 1] = '\0'; + strncpy(map->peers[*id].name, name, SMR_NAME_MAX); + map->peers[*id].name[SMR_NAME_MAX - 1] = '\0'; map->peers[*id].region = NULL; map->num_peers++; - map->peers[*id].peer.id = *id; + map->peers[*id].id_assigned = true; out: ofi_spin_unlock(&map->lock); @@ -585,8 +586,9 @@ void smr_map_del(struct smr_map *map, int64_t id) assert(id >= 0 && id < SMR_MAX_PEERS); pthread_mutex_lock(&ep_list_lock); - dlist_foreach_container(&ep_name_list, struct smr_ep_name, name, entry) { - if (!strcmp(name->name, map->peers[id].peer.name)) { + dlist_foreach_container(&ep_name_list, struct smr_ep_name, name, + entry) { + if (!strcmp(name->name, map->peers[id].name)) { local = true; break; } @@ -595,9 +597,9 @@ void smr_map_del(struct smr_map *map, int64_t id) ofi_spin_lock(&map->lock); smr_unmap_region(&smr_prov, map, id, local); map->peers[id].fiaddr = FI_ADDR_NOTAVAIL; - map->peers[id].peer.id = -1; + map->peers[id].id_assigned = false; map->num_peers--; - ofi_rbmap_find_delete(&map->rbmap, map->peers[id].peer.name); + ofi_rbmap_find_delete(&map->rbmap, map->peers[id].name); ofi_spin_unlock(&map->lock); } diff --git a/prov/shm/src/smr_util.h b/prov/shm/src/smr_util.h deleted file mode 100644 index 407736f06eb..00000000000 --- a/prov/shm/src/smr_util.h +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Copyright (c) 2016-2021 Intel Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef _OFI_SHM_H_ -#define _OFI_SHM_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#define SMR_VERSION 8 - -#define SMR_FLAG_ATOMIC (1 << 0) -#define SMR_FLAG_DEBUG (1 << 1) -#define SMR_FLAG_IPC_SOCK (1 << 2) -#define SMR_FLAG_HMEM_ENABLED (1 << 3) - -#define SMR_CMD_SIZE 256 /* align with 64-byte cache line */ - -/* SMR op_src: Specifies data source location */ -enum { - smr_src_inline, /* command data */ - smr_src_inject, /* inject buffers */ - smr_src_iov, /* reference iovec via CMA */ - smr_src_mmap, /* mmap-based fallback protocol */ - smr_src_sar, /* segmentation fallback protocol */ - smr_src_ipc, /* device IPC handle protocol */ - smr_src_max, -}; - -//reserves 0-255 for defined ops and room for new ops -//256 and beyond reserved for ctrl ops -#define SMR_OP_MAX (1 << 8) - -#define SMR_REMOTE_CQ_DATA (1 << 0) -#define SMR_RMA_REQ (1 << 1) -#define SMR_TX_COMPLETION (1 << 2) -#define SMR_RX_COMPLETION (1 << 3) -#define SMR_MULTI_RECV (1 << 4) - -/* CMA/XPMEM capability. Generic acronym used: - * VMA: Virtual Memory Address */ -enum { - SMR_VMA_CAP_NA, - SMR_VMA_CAP_ON, - SMR_VMA_CAP_OFF, -}; - -/* - * Unique smr_op_hdr for smr message protocol: - * addr - local shm_id of peer sending msg (for shm lookup) - * op - type of op (ex. ofi_op_msg, defined in ofi_proto.h) - * op_src - msg src (ex. smr_src_inline, defined above) - * op_flags - operation flags (ex. SMR_REMOTE_CQ_DATA, defined above) - * src_data - src of additional op data (inject offset / resp offset) - * data - remote CQ data - */ -struct smr_msg_hdr { - uint64_t msg_id; - int64_t id; - uint32_t op; - uint16_t op_src; - uint16_t op_flags; - - uint64_t size; - uint64_t src_data; - uint64_t data; - union { - uint64_t tag; - struct { - uint8_t datatype; - uint8_t atomic_op; - }; - }; -} __attribute__ ((aligned(16))); - -#define SMR_BUF_BATCH_MAX 64 -#define SMR_MSG_DATA_LEN (SMR_CMD_SIZE - sizeof(struct smr_msg_hdr)) - -union smr_cmd_data { - uint8_t msg[SMR_MSG_DATA_LEN]; - struct { - size_t iov_count; - struct iovec iov[(SMR_MSG_DATA_LEN - sizeof(size_t)) / - sizeof(struct iovec)]; - }; - struct { - uint32_t buf_batch_size; - int16_t sar[SMR_BUF_BATCH_MAX]; - }; - struct ipc_info ipc_info; -}; - -struct smr_cmd_msg { - struct smr_msg_hdr hdr; - union smr_cmd_data data; -}; - -#define SMR_RMA_DATA_LEN (128 - sizeof(uint64_t)) -struct smr_cmd_rma { - uint64_t rma_count; - union { - struct fi_rma_iov rma_iov[SMR_RMA_DATA_LEN / - sizeof(struct fi_rma_iov)]; - struct fi_rma_ioc rma_ioc[SMR_RMA_DATA_LEN / - sizeof(struct fi_rma_ioc)]; - }; -}; - -struct smr_cmd { - union { - struct smr_cmd_msg msg; - struct smr_cmd_rma rma; - }; -}; - -#define SMR_INJECT_SIZE 4096 -#define SMR_COMP_INJECT_SIZE (SMR_INJECT_SIZE / 2) -#define SMR_SAR_SIZE 32768 - -#define SMR_DIR "/dev/shm/" -#define SMR_NAME_MAX 256 -#define SMR_PATH_MAX (SMR_NAME_MAX + sizeof(SMR_DIR)) -#define SMR_SOCK_NAME_MAX sizeof(((struct sockaddr_un *)0)->sun_path) - -/* On next version update remove this struct to make id a bool in the smr_peer - * remove name from smr_peer_data because it is unused. - */ -struct smr_addr { - char name[SMR_NAME_MAX]; - int64_t id; -}; - -struct smr_peer_data { - struct smr_addr addr; - uint32_t sar_status; - uint16_t name_sent; - uint16_t ipc_valid; - struct ofi_xpmem_client xpmem; -}; - -extern struct dlist_entry ep_name_list; -extern pthread_mutex_t ep_list_lock; -extern struct dlist_entry sock_name_list; -extern pthread_mutex_t sock_list_lock; - -struct smr_region; - -struct smr_ep_name { - char name[SMR_NAME_MAX]; - struct smr_region *region; - struct dlist_entry entry; -}; - -static inline const char *smr_no_prefix(const char *addr) -{ - char *start; - - return (start = strstr(addr, "://")) ? start + 3 : addr; -} - -struct smr_peer { - struct smr_addr peer; - fi_addr_t fiaddr; - struct smr_region *region; - int pid_fd; -}; - -#define SMR_MAX_PEERS 256 - -struct smr_map { - ofi_spin_t lock; - int64_t cur_id; - int num_peers; - uint16_t flags; - struct ofi_rbmap rbmap; - struct smr_peer peers[SMR_MAX_PEERS]; -}; - -struct smr_region { - uint8_t version; - uint8_t resv; - uint16_t flags; - int pid; - uint8_t cma_cap_peer; - uint8_t cma_cap_self; - uint8_t xpmem_cap_self; - uint8_t resv2; - - uint32_t max_sar_buf_per_peer; - struct ofi_xpmem_pinfo xpmem_self; - struct ofi_xpmem_pinfo xpmem_peer; - void *base_addr; - pthread_spinlock_t lock; /* lock for shm access - if both ep->tx_lock and this lock need to - held, then ep->tx_lock needs to be held - first */ - - struct smr_map *map; - - size_t total_size; - - /* offsets from start of smr_region */ - size_t cmd_queue_offset; - size_t resp_queue_offset; - size_t inject_pool_offset; - size_t sar_pool_offset; - size_t peer_data_offset; - size_t name_offset; - size_t sock_name_offset; -}; - -struct smr_resp { - uint64_t msg_id; - uint64_t status; -}; - -struct smr_inject_buf { - union { - uint8_t data[SMR_INJECT_SIZE]; - struct { - uint8_t buf[SMR_COMP_INJECT_SIZE]; - uint8_t comp[SMR_COMP_INJECT_SIZE]; - }; - }; -}; - -enum smr_status { - SMR_STATUS_SUCCESS = 0, /* success*/ - SMR_STATUS_BUSY = FI_EBUSY, /* busy */ - - SMR_STATUS_OFFSET = 1024, /* Beginning of shm-specific codes */ - SMR_STATUS_SAR_EMPTY, /* buffer can be written into */ - SMR_STATUS_SAR_FULL, /* buffer can be read from */ -}; - -struct smr_sar_buf { - uint8_t buf[SMR_SAR_SIZE]; -}; - -/* TODO it is expected that a future patch will expand the smr_cmd - * structure to also include the rma information, thereby removing the - * need to have two commands in the cmd_entry. We can also remove the - * command entry completely and just use the smr_cmd - */ -struct smr_cmd_entry { - struct smr_cmd cmd; - struct smr_cmd rma_cmd; -}; - -/* Queue of offsets of the command blocks obtained from the command pool - * freestack - */ -OFI_DECLARE_CIRQUE(struct smr_resp, smr_resp_queue); -OFI_DECLARE_ATOMIC_Q(struct smr_cmd_entry, smr_cmd_queue); - -static inline struct smr_region *smr_peer_region(struct smr_region *smr, int i) -{ - return smr->map->peers[i].region; -} -static inline struct smr_cmd_queue *smr_cmd_queue(struct smr_region *smr) -{ - return (struct smr_cmd_queue *) ((char *) smr + smr->cmd_queue_offset); -} -static inline struct smr_resp_queue *smr_resp_queue(struct smr_region *smr) -{ - return (struct smr_resp_queue *) ((char *) smr + smr->resp_queue_offset); -} -static inline struct smr_freestack *smr_inject_pool(struct smr_region *smr) -{ - return (struct smr_freestack *) ((char *) smr + smr->inject_pool_offset); -} -static inline struct smr_peer_data *smr_peer_data(struct smr_region *smr) -{ - return (struct smr_peer_data *) ((char *) smr + smr->peer_data_offset); -} -static inline struct smr_freestack *smr_sar_pool(struct smr_region *smr) -{ - return (struct smr_freestack *) ((char *) smr + smr->sar_pool_offset); -} -static inline const char *smr_name(struct smr_region *smr) -{ - return (const char *) smr + smr->name_offset; -} - -static inline char *smr_sock_name(struct smr_region *smr) -{ - return (char *) smr + smr->sock_name_offset; -} - -static inline void smr_set_map(struct smr_region *smr, struct smr_map *map) -{ - smr->map = map; -} - -struct smr_attr { - const char *name; - size_t rx_count; - size_t tx_count; - uint16_t flags; -}; - -size_t smr_calculate_size_offsets(size_t tx_count, size_t rx_count, - size_t *cmd_offset, size_t *resp_offset, - size_t *inject_offset, size_t *sar_offset, - size_t *peer_offset, size_t *name_offset, - size_t *sock_offset); -void smr_cma_check(struct smr_region *region, struct smr_region *peer_region); -void smr_cleanup(void); -int smr_map_to_region(const struct fi_provider *prov, struct smr_map *map, - int64_t id); -void smr_map_to_endpoint(struct smr_region *region, int64_t id); -void smr_unmap_region(const struct fi_provider *prov, struct smr_map *map, - int64_t id, bool found); -void smr_unmap_from_endpoint(struct smr_region *region, int64_t id); -void smr_exchange_all_peers(struct smr_region *region); -int smr_map_add(const struct fi_provider *prov, struct smr_map *map, - const char *name, int64_t *id); -void smr_map_del(struct smr_map *map, int64_t id); - -struct smr_region *smr_map_get(struct smr_map *map, int64_t id); - -int smr_create(const struct fi_provider *prov, struct smr_map *map, - const struct smr_attr *attr, struct smr_region *volatile *smr); -void smr_free(struct smr_region *smr); - -#ifdef __cplusplus -} -#endif - -#endif /* _OFI_SHM_H_ */ From aad2d874c05e0bf22a25dc73b965b7508baaf66c Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Thu, 19 Dec 2024 14:17:12 -0800 Subject: [PATCH 05/13] prov/shm: consolidate cma and xpmem caps shm has self and peer caps for each p2p interface (right now just CMA and xpmem). The support for each of these interfaces is saved in separate fields which causes a lot of wasted memory and is confusing. Merge these into two fields (one for self and one for peer) which holds the information for all p2p interfaces and is accessed by the P2P type enums. CMA also needs a flag to know wether CMA support has been queried yet or not. This also moves some shm fields around for alignment Signed-off-by: Alexia Ingerson --- include/ofi_xpmem.h | 2 +- prov/shm/src/smr.h | 41 +++++++++++++++++++------------------ prov/shm/src/smr_ep.c | 10 ++++++--- prov/shm/src/smr_util.c | 45 ++++++++++++++++++++++------------------- 4 files changed, 53 insertions(+), 45 deletions(-) diff --git a/include/ofi_xpmem.h b/include/ofi_xpmem.h index c96164c9bb6..1a58af21e59 100644 --- a/include/ofi_xpmem.h +++ b/include/ofi_xpmem.h @@ -48,7 +48,7 @@ typedef int64_t xpmem_segid_t; #endif /* HAVE_XPMEM */ struct ofi_xpmem_client { - uint8_t cap; + bool avail; xpmem_apid_t apid; uintptr_t addr_max; }; diff --git a/prov/shm/src/smr.h b/prov/shm/src/smr.h index 41b619f6dda..21235384b5b 100644 --- a/prov/shm/src/smr.h +++ b/prov/shm/src/smr.h @@ -92,6 +92,7 @@ #define SMR_FLAG_ATOMIC (1 << 0) #define SMR_FLAG_DEBUG (1 << 1) #define SMR_FLAG_HMEM_ENABLED (1 << 3) +#define SMR_FLAG_CMA_INIT (1 << 4) //shm region defines #define SMR_CMD_SIZE 440 /* align with 64-byte cache line */ @@ -112,14 +113,6 @@ enum { smr_proto_max, }; -/* CMA/XPMEM capability. Generic acronym used: - * VMA: Virtual Memory Address */ -enum { - SMR_VMA_CAP_NA, - SMR_VMA_CAP_ON, - SMR_VMA_CAP_OFF, -}; - /* * Unique smr_op_hdr for smr message protocol: * entry - for internal use managing commands (must be kept first) @@ -250,15 +243,16 @@ struct smr_region { uint8_t version; uint8_t resv; uint16_t flags; - int pid; - uint8_t cma_cap_peer; - uint8_t cma_cap_self; - uint8_t xpmem_cap_self; - uint8_t resv2; + uint8_t self_vma_caps; + uint8_t peer_vma_caps; - uint32_t max_sar_buf_per_peer; + uint16_t max_sar_buf_per_peer; struct ofi_xpmem_pinfo xpmem_self; struct ofi_xpmem_pinfo xpmem_peer; + + int pid; + int resv2; + void *base_addr; char name[SMR_NAME_MAX]; @@ -274,6 +268,17 @@ struct smr_region { size_t peer_data_offset; }; +static inline void smr_set_vma_cap(uint8_t *vma_cap, uint8_t type, bool avail) +{ + (*vma_cap) &= ~(1 << type); + (*vma_cap) |= (uint8_t) avail << type; +} + +static inline uint8_t smr_get_vma_cap(uint8_t vma_cap, uint8_t type) +{ + return vma_cap & (1 << type); +} + struct smr_inject_buf { union { uint8_t data[SMR_INJECT_SIZE]; @@ -615,12 +620,8 @@ void smr_ep_progress(struct util_ep *util_ep); static inline bool smr_vma_enabled(struct smr_ep *ep, struct smr_region *peer_smr) { - if (ep->region == peer_smr) - return (ep->region->cma_cap_self == SMR_VMA_CAP_ON || - ep->region->xpmem_cap_self == SMR_VMA_CAP_ON); - else - return (ep->region->cma_cap_peer == SMR_VMA_CAP_ON || - peer_smr->xpmem_cap_self == SMR_VMA_CAP_ON); + return ep->region == peer_smr ? ep->region->self_vma_caps : + ep->region->peer_vma_caps; } static inline void smr_set_ipc_valid(struct smr_ep *ep, uint64_t id) diff --git a/prov/shm/src/smr_ep.c b/prov/shm/src/smr_ep.c index 500c53bd615..2c28d0b7ea1 100644 --- a/prov/shm/src/smr_ep.c +++ b/prov/shm/src/smr_ep.c @@ -808,8 +808,11 @@ static int smr_ep_ctrl(struct fid *fid, int command, void *arg) return ret; if (ep->util_ep.caps & FI_HMEM || smr_env.disable_cma) { - ep->region->cma_cap_peer = SMR_VMA_CAP_OFF; - ep->region->cma_cap_self = SMR_VMA_CAP_OFF; + smr_set_vma_cap(&ep->region->peer_vma_caps, + FI_SHM_P2P_CMA, false); + smr_set_vma_cap(&ep->region->self_vma_caps, + FI_SHM_P2P_CMA, false); + ep->region->flags |= SMR_FLAG_CMA_INIT; } if (ofi_hmem_any_ipc_enabled()) @@ -851,7 +854,8 @@ static int smr_ep_ctrl(struct fid *fid, int command, void *arg) * endpoint p2p to XPMEM so it can be used on the fast * path */ - if (ep->region->xpmem_cap_self == SMR_VMA_CAP_ON) + if (smr_get_vma_cap(ep->region->self_vma_caps, + FI_SHM_P2P_XPMEM)) ep->p2p_type = FI_SHM_P2P_XPMEM; break; diff --git a/prov/shm/src/smr_util.c b/prov/shm/src/smr_util.c index 7e2750ee98b..d26fe5ce347 100644 --- a/prov/shm/src/smr_util.c +++ b/prov/shm/src/smr_util.c @@ -54,8 +54,11 @@ void smr_cma_check(struct smr_region *smr, struct smr_region *peer_smr) int remote_pid; int ret; - if (smr != peer_smr && peer_smr->cma_cap_peer != SMR_VMA_CAP_NA) { - smr->cma_cap_peer = peer_smr->cma_cap_peer; + if (smr != peer_smr && peer_smr->flags & SMR_FLAG_CMA_INIT) { + smr_set_vma_cap(&smr->peer_vma_caps, FI_SHM_P2P_CMA, + smr_get_vma_cap(peer_smr->peer_vma_caps, + FI_SHM_P2P_CMA)); + smr->flags |= SMR_FLAG_CMA_INIT; return; } remote_pid = peer_smr->pid; @@ -69,10 +72,16 @@ void smr_cma_check(struct smr_region *smr, struct smr_region *peer_smr) assert(remote_pid == peer_smr->pid); if (smr == peer_smr) { - smr->cma_cap_self = (ret == -1) ? SMR_VMA_CAP_OFF : SMR_VMA_CAP_ON; + smr_set_vma_cap(&smr->self_vma_caps, FI_SHM_P2P_CMA, + (ret == -1) ? false : true); } else { - smr->cma_cap_peer = (ret == -1) ? SMR_VMA_CAP_OFF : SMR_VMA_CAP_ON; - peer_smr->cma_cap_peer = smr->cma_cap_peer; + smr_set_vma_cap(&smr->peer_vma_caps, FI_SHM_P2P_CMA, + (ret == -1) ? false : true); + smr_set_vma_cap(&peer_smr->peer_vma_caps, FI_SHM_P2P_CMA, + smr_get_vma_cap(smr->peer_vma_caps, + FI_SHM_P2P_CMA)); + smr->flags |= SMR_FLAG_CMA_INIT; + peer_smr->flags |= SMR_FLAG_CMA_INIT; } } @@ -254,12 +263,8 @@ int smr_create(const struct fi_provider *prov, struct smr_map *map, (*smr)->flags |= SMR_FLAG_DEBUG; #endif - (*smr)->cma_cap_peer = SMR_VMA_CAP_NA; - (*smr)->cma_cap_self = SMR_VMA_CAP_NA; - - (*smr)->xpmem_cap_self = SMR_VMA_CAP_OFF; if (xpmem && smr_env.use_xpmem) { - (*smr)->xpmem_cap_self = SMR_VMA_CAP_ON; + smr_set_vma_cap(&(*smr)->self_vma_caps, FI_SHM_P2P_XPMEM, true); (*smr)->xpmem_self = xpmem->pinfo; } @@ -287,7 +292,7 @@ int smr_create(const struct fi_provider *prov, struct smr_map *map, smr_peer_data(*smr)[i].id = -1; smr_peer_data(*smr)[i].sar_status = 0; smr_peer_data(*smr)[i].name_sent = 0; - smr_peer_data(*smr)[i].xpmem.cap = SMR_VMA_CAP_OFF; + smr_peer_data(*smr)[i].xpmem.avail = false; } strcpy((*smr)->name, attr->name); @@ -434,26 +439,24 @@ void smr_map_to_endpoint(struct smr_ep *ep, int64_t id) local_peers = smr_peer_data(ep->region); local_peers[id].local_region = (uintptr_t) peer_smr; - if ((ep->region != peer_smr && - ep->region->cma_cap_peer == SMR_VMA_CAP_NA) || - (ep->region == peer_smr && - ep->region->cma_cap_self == SMR_VMA_CAP_NA)) + if (ep->region == peer_smr || !(ep->region->flags & SMR_FLAG_CMA_INIT)) smr_cma_check(ep->region, peer_smr); /* enable xpmem locally if the peer also has it enabled */ - if (peer_smr->xpmem_cap_self == SMR_VMA_CAP_ON && - ep->region->xpmem_cap_self == SMR_VMA_CAP_ON) { + if (smr_get_vma_cap(peer_smr->self_vma_caps, FI_SHM_P2P_XPMEM) && + smr_get_vma_cap(ep->region->self_vma_caps, FI_SHM_P2P_XPMEM)) { ret = ofi_xpmem_enable(&peer_smr->xpmem_self, &local_peers[id].xpmem); if (ret) { - local_peers[id].xpmem.cap = SMR_VMA_CAP_OFF; - ep->region->xpmem_cap_self = SMR_VMA_CAP_OFF; + local_peers[id].xpmem.avail = false; + smr_set_vma_cap(&ep->region->self_vma_caps, + FI_SHM_P2P_XPMEM, false); return; } - local_peers[id].xpmem.cap = SMR_VMA_CAP_ON; + local_peers[id].xpmem.avail = true; local_peers[id].xpmem.addr_max = peer_smr->xpmem_self.address_max; } else { - local_peers[id].xpmem.cap = SMR_VMA_CAP_OFF; + local_peers[id].xpmem.avail = false; } smr_set_ipc_valid(ep, id); From 38c3f860dea40fc5fb7a7ccd18b268030edb1a3f Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Fri, 20 Dec 2024 12:31:40 -0800 Subject: [PATCH 06/13] prov/shm: add a pointer to the map to the EP Simplifies access to the map to remove need for container Signed-off-by: Alexia Ingerson --- prov/shm/src/smr.h | 10 +++------- prov/shm/src/smr_ep.c | 16 ++++++---------- prov/shm/src/smr_progress.c | 35 ++++++++++++++--------------------- prov/shm/src/smr_util.c | 22 ++++++++-------------- 4 files changed, 31 insertions(+), 52 deletions(-) diff --git a/prov/shm/src/smr.h b/prov/shm/src/smr.h index 21235384b5b..0404b2f7db8 100644 --- a/prov/shm/src/smr.h +++ b/prov/shm/src/smr.h @@ -316,6 +316,7 @@ struct smr_ep { const char *name; uint64_t msg_id; struct smr_region *volatile region; + struct smr_map *map; struct fid_peer_srx *srx; struct ofi_bufpool *cmd_ctx_pool; struct ofi_bufpool *unexp_buf_pool; @@ -340,8 +341,7 @@ struct smr_av { static inline struct smr_region *smr_peer_region(struct smr_ep *ep, int i) { - return container_of(ep->util_ep.av, struct smr_av, util_av)-> - smr_map.peers[i].region; + return ep->map->peers[i].region; } static inline struct smr_cmd_queue *smr_cmd_queue(struct smr_region *smr) { @@ -626,12 +626,8 @@ static inline bool smr_vma_enabled(struct smr_ep *ep, static inline void smr_set_ipc_valid(struct smr_ep *ep, uint64_t id) { - struct smr_av *av; - - av = container_of(ep->util_ep.av, struct smr_av, util_av); - if (ofi_hmem_is_initialized(FI_HMEM_ZE) && - av->smr_map.peers[id].pid_fd == -1) + ep->map->peers[id].pid_fd == -1) smr_peer_data(ep->region)[id].ipc_valid = 0; else smr_peer_data(ep->region)[id].ipc_valid = 1; diff --git a/prov/shm/src/smr_ep.c b/prov/shm/src/smr_ep.c index 2c28d0b7ea1..d5d364a50f9 100644 --- a/prov/shm/src/smr_ep.c +++ b/prov/shm/src/smr_ep.c @@ -190,12 +190,9 @@ static void smr_send_name(struct smr_ep *ep, int64_t id) int64_t smr_verify_peer(struct smr_ep *ep, fi_addr_t fi_addr) { - struct smr_av *av; int64_t id; int ret; - av = container_of(ep->util_ep.av, struct smr_av, util_av); - id = smr_addr_lookup(ep->util_ep.av, fi_addr); assert(id < SMR_MAX_PEERS); if (id < 0) @@ -204,10 +201,10 @@ int64_t smr_verify_peer(struct smr_ep *ep, fi_addr_t fi_addr) if (smr_peer_data(ep->region)[id].id >= 0) return id; - if (!av->smr_map.peers[id].region) { - ofi_spin_lock(&av->smr_map.lock); - ret = smr_map_to_region(&smr_prov, &av->smr_map, id); - ofi_spin_unlock(&av->smr_map.lock); + if (!ep->map->peers[id].region) { + ofi_spin_lock(&ep->map->lock); + ret = smr_map_to_region(&smr_prov, ep->map, id); + ofi_spin_unlock(&ep->map->lock); if (ret) return -1; } @@ -755,6 +752,7 @@ static int smr_ep_bind(struct fid *ep_fid, struct fid *bfid, uint64_t flags) "duplicate AV binding\n"); return -FI_EINVAL; } + ep->map = &container_of(av, struct smr_av, util_av)->smr_map; break; case FI_CLASS_CQ: ret = smr_ep_bind_cq(ep, container_of(bfid, struct util_cq, @@ -782,12 +780,10 @@ static int smr_ep_ctrl(struct fid *fid, int command, void *arg) struct smr_attr attr; struct smr_domain *domain; struct smr_ep *ep; - struct smr_av *av; struct fid_ep *srx; int ret; ep = container_of(fid, struct smr_ep, util_ep.ep_fid.fid); - av = container_of(ep->util_ep.av, struct smr_av, util_av); switch (command) { case FI_ENABLE: @@ -803,7 +799,7 @@ static int smr_ep_ctrl(struct fid *fid, int command, void *arg) attr.flags = ep->util_ep.caps & FI_HMEM ? SMR_FLAG_HMEM_ENABLED : 0; - ret = smr_create(&smr_prov, &av->smr_map, &attr, &ep->region); + ret = smr_create(&smr_prov, ep->map, &attr, &ep->region); if (ret) return ret; diff --git a/prov/shm/src/smr_progress.c b/prov/shm/src/smr_progress.c index 9013205c2f5..bbdb4ab655d 100644 --- a/prov/shm/src/smr_progress.c +++ b/prov/shm/src/smr_progress.c @@ -535,16 +535,14 @@ static struct smr_pend_entry *smr_progress_ipc(struct smr_cmd *cmd, ssize_t hmem_copy_ret; struct ofi_mr_entry *mr_entry; struct smr_domain *domain; - struct smr_av *av; struct smr_pend_entry *ipc_entry; domain = container_of(ep->util_ep.domain, struct smr_domain, util_domain); - av = container_of(ep->util_ep.av, struct smr_av, util_av); if (cmd->data.ipc_info.iface == FI_HMEM_ZE) ze_set_pid_fd((void **) &cmd->data.ipc_info.ipc_handle, - av->smr_map.peers[cmd->hdr.id].pid_fd); + ep->map->peers[cmd->hdr.id].pid_fd); //TODO disable IPC if more than 1 interface is initialized ret = ofi_ipc_cache_search(domain->ipc_cache, cmd->hdr.id, @@ -864,14 +862,11 @@ int smr_unexp_start(struct fi_peer_rx_entry *rx_entry) static void smr_progress_connreq(struct smr_ep *ep, struct smr_cmd *cmd) { - struct smr_av *av; struct smr_region *peer_smr; int64_t idx = -1; int ret = 0; - av = container_of(ep->util_ep.av, struct smr_av, util_av); - ret = smr_map_add(&smr_prov, &av->smr_map, (char *) cmd->data.msg, - &idx); + ret = smr_map_add(&smr_prov, ep->map, (char *) cmd->data.msg, &idx); if (ret || idx < 0) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "Error processing mapping request\n"); @@ -880,9 +875,9 @@ static void smr_progress_connreq(struct smr_ep *ep, struct smr_cmd *cmd) peer_smr = smr_peer_region(ep, idx); if (!peer_smr) { - ofi_spin_lock(&av->smr_map.lock); - ret = smr_map_to_region(&smr_prov, &av->smr_map, idx); - ofi_spin_unlock(&av->smr_map.lock); + ofi_spin_lock(&ep->map->lock); + ret = smr_map_to_region(&smr_prov, ep->map, idx); + ofi_spin_unlock(&ep->map->lock); if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "Could not map peer region\n"); @@ -896,10 +891,10 @@ static void smr_progress_connreq(struct smr_ep *ep, struct smr_cmd *cmd) /* TODO track and update/complete in error any transfers * to or from old mapping */ - ofi_spin_lock(&av->smr_map.lock); - smr_unmap_region(&smr_prov, &av->smr_map, idx, false); - smr_map_to_region(&smr_prov, &av->smr_map, idx); - ofi_spin_unlock(&av->smr_map.lock); + ofi_spin_lock(&ep->map->lock); + smr_unmap_region(&smr_prov, ep->map, idx, false); + smr_map_to_region(&smr_prov, ep->map, idx); + ofi_spin_unlock(&ep->map->lock); peer_smr = smr_peer_region(ep, idx); } @@ -907,9 +902,10 @@ static void smr_progress_connreq(struct smr_ep *ep, struct smr_cmd *cmd) smr_peer_data(peer_smr)[cmd->hdr.id].id = idx; smr_peer_data(ep->region)[idx].id = cmd->hdr.id; - assert(av->smr_map.num_peers > 0); - ep->region->max_sar_buf_per_peer = MIN(SMR_BUF_BATCH_MAX, - SMR_MAX_PEERS / av->smr_map.num_peers); + assert(ep->map->num_peers > 0); + ep->region->max_sar_buf_per_peer = MIN( + SMR_BUF_BATCH_MAX, + SMR_MAX_PEERS / ep->map->num_peers); } static int smr_alloc_cmd_ctx(struct smr_ep *ep, @@ -984,15 +980,12 @@ static int smr_progress_cmd_msg(struct smr_ep *ep, struct smr_cmd *cmd) { struct fi_peer_match_attr attr; struct fi_peer_rx_entry *rx_entry; - struct smr_av *av; int ret; if (cmd->hdr.rx_ctx) return smr_progress_pending(ep, cmd); - av = container_of(ep->util_ep.av, struct smr_av, util_av); - - attr.addr = av->smr_map.peers[cmd->hdr.id].fiaddr; + attr.addr = ep->map->peers[cmd->hdr.id].fiaddr; attr.msg_size = cmd->hdr.size; attr.tag = cmd->hdr.tag; if (cmd->hdr.op == ofi_op_tagged) { diff --git a/prov/shm/src/smr_util.c b/prov/shm/src/smr_util.c index d26fe5ce347..53eb67d18b1 100644 --- a/prov/shm/src/smr_util.c +++ b/prov/shm/src/smr_util.c @@ -426,14 +426,11 @@ void smr_map_to_endpoint(struct smr_ep *ep, int64_t id) { int ret; struct smr_region *peer_smr; - struct smr_av *av; struct smr_peer_data *local_peers; - av = container_of(ep->util_ep.av, struct smr_av, util_av); - - assert(ofi_spin_held(&av->smr_map.lock)); + assert(ofi_spin_held(&ep->map->lock)); peer_smr = smr_peer_region(ep, id); - if (!av->smr_map.peers[id].id_assigned || !peer_smr) + if (!ep->map->peers[id].id_assigned || !peer_smr) return; local_peers = smr_peer_data(ep->region); @@ -454,7 +451,8 @@ void smr_map_to_endpoint(struct smr_ep *ep, int64_t id) return; } local_peers[id].xpmem.avail = true; - local_peers[id].xpmem.addr_max = peer_smr->xpmem_self.address_max; + local_peers[id].xpmem.addr_max = + peer_smr->xpmem_self.address_max; } else { local_peers[id].xpmem.avail = false; } @@ -465,7 +463,7 @@ void smr_map_to_endpoint(struct smr_ep *ep, int64_t id) } void smr_unmap_region(const struct fi_provider *prov, struct smr_map *map, - int64_t peer_id, bool local) + int64_t peer_id, bool local) { struct smr_region *peer_region; struct smr_peer *peer; @@ -512,12 +510,10 @@ void smr_unmap_region(const struct fi_provider *prov, struct smr_map *map, void smr_unmap_from_endpoint(struct smr_ep *ep, int64_t id) { struct smr_region *peer_smr; - struct smr_av *av; struct smr_peer_data *local_peers, *peer_peers; int64_t peer_id; - av = container_of(ep->util_ep.av, struct smr_av, util_av); - if (!av->smr_map.peers[id].id_assigned) + if (!ep->map->peers[id].id_assigned) return; peer_smr = smr_peer_region(ep, id); @@ -534,15 +530,13 @@ void smr_unmap_from_endpoint(struct smr_ep *ep, int64_t id) void smr_exchange_all_peers(struct smr_ep *ep) { - struct smr_av *av; int64_t i; - av = container_of(ep->util_ep.av, struct smr_av, util_av); - ofi_spin_lock(&av->smr_map.lock); + ofi_spin_lock(&ep->map->lock); for (i = 0; i < SMR_MAX_PEERS; i++) smr_map_to_endpoint(ep, i); - ofi_spin_unlock(&av->smr_map.lock); + ofi_spin_unlock(&ep->map->lock); } int smr_map_add(const struct fi_provider *prov, struct smr_map *map, From 4840a199478a4c8e4d44e1f6d8d6998733ffec59 Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Fri, 20 Dec 2024 12:31:31 -0800 Subject: [PATCH 07/13] prov/shm: remove map->lock and use util_av->lock instead There is a 1:1 relationship with the av and map so just reuse the util av lock for access to the map as well. This requires some reorganizing of the locking semantics Signed-off-by: Alexia Ingerson --- prov/shm/src/smr.h | 5 ++--- prov/shm/src/smr_av.c | 36 +++++++++++++++-------------------- prov/shm/src/smr_ep.c | 4 ++-- prov/shm/src/smr_progress.c | 18 +++++++----------- prov/shm/src/smr_util.c | 38 +++++++++++++++++++------------------ 5 files changed, 46 insertions(+), 55 deletions(-) diff --git a/prov/shm/src/smr.h b/prov/shm/src/smr.h index 0404b2f7db8..74207d861ce 100644 --- a/prov/shm/src/smr.h +++ b/prov/shm/src/smr.h @@ -231,7 +231,6 @@ struct smr_peer { #define SMR_MAX_PEERS 256 struct smr_map { - ofi_spin_t lock; int64_t cur_id; int num_peers; uint16_t flags; @@ -388,8 +387,8 @@ void smr_unmap_region(const struct fi_provider *prov, struct smr_map *map, int64_t id, bool found); void smr_unmap_from_endpoint(struct smr_ep *ep, int64_t id); void smr_exchange_all_peers(struct smr_ep *ep); -int smr_map_add(const struct fi_provider *prov, struct smr_map *map, - const char *name, int64_t *id); +void smr_map_add(const struct fi_provider *prov, struct smr_map *map, + const char *name, int64_t *id); void smr_map_del(struct smr_map *map, int64_t id); struct smr_region *smr_map_get(struct smr_map *map, int64_t id); diff --git a/prov/shm/src/smr_av.c b/prov/shm/src/smr_av.c index 2d2fcb83d23..df775e08253 100644 --- a/prov/shm/src/smr_av.c +++ b/prov/shm/src/smr_av.c @@ -55,36 +55,36 @@ static int smr_map_init(const struct fi_provider *prov, struct smr_map *map, map->flags = flags; ofi_rbmap_init(&map->rbmap, smr_name_compare); - ofi_spin_init(&map->lock); return 0; } -static void smr_map_cleanup(struct smr_map *map) +static void smr_map_cleanup(struct smr_av *av) { int64_t i; + ofi_mutex_lock(&av->util_av.lock); for (i = 0; i < SMR_MAX_PEERS; i++) { - if (map->peers[i].id_assigned) - smr_map_del(map, i); + if (av->smr_map.peers[i].id_assigned) + smr_map_del(&av->smr_map, i); } - ofi_rbmap_cleanup(&map->rbmap); + ofi_rbmap_cleanup(&av->smr_map.rbmap); + ofi_mutex_unlock(&av->util_av.lock); } static int smr_av_close(struct fid *fid) { + struct smr_av *av; int ret; - struct util_av *av; - struct smr_av *smr_av; - av = container_of(fid, struct util_av, av_fid); - smr_av = container_of(av, struct smr_av, util_av); + av = container_of(fid, struct smr_av, util_av.av_fid); - ret = ofi_av_close(av); + smr_map_cleanup(av); + + ret = ofi_av_close(&av->util_av); if (ret) return ret; - smr_map_cleanup(&smr_av->smr_map); free(av); return 0; } @@ -116,19 +116,14 @@ static int smr_av_insert(struct fid_av *av_fid, const void *addr, size_t count, util_av = container_of(av_fid, struct util_av, av_fid); smr_av = container_of(util_av, struct smr_av, util_av); + ofi_mutex_lock(&util_av->lock); for (i = 0; i < count; i++, addr = (char *) addr + strlen(addr) + 1) { FI_INFO(&smr_prov, FI_LOG_AV, "%s\n", (const char *) addr); util_addr = FI_ADDR_NOTAVAIL; if (smr_av->used < SMR_MAX_PEERS) { - ret = smr_map_add(&smr_prov, &smr_av->smr_map, - addr, &shm_id); - if (!ret) { - ofi_mutex_lock(&util_av->lock); - ret = ofi_av_insert_addr(util_av, &shm_id, - &util_addr); - ofi_mutex_unlock(&util_av->lock); - } + smr_map_add(&smr_prov, &smr_av->smr_map, addr, &shm_id); + ret = ofi_av_insert_addr(util_av, &shm_id, &util_addr); } else { FI_WARN(&smr_prov, FI_LOG_AV, "AV insert failed. The maximum number of AV " @@ -171,8 +166,8 @@ static int smr_av_insert(struct fid_av *av_fid, const void *addr, size_t count, smr_ep->srx->owner_ops->foreach_unspec_addr(smr_ep->srx, &smr_get_addr); } - } + ofi_mutex_unlock(&util_av->lock); return succ_count; } @@ -217,7 +212,6 @@ static int smr_av_remove(struct fid_av *av_fid, fi_addr_t *fi_addr, } smr_av->used--; } - ofi_mutex_unlock(&util_av->lock); return ret; } diff --git a/prov/shm/src/smr_ep.c b/prov/shm/src/smr_ep.c index d5d364a50f9..636d7ec6ae4 100644 --- a/prov/shm/src/smr_ep.c +++ b/prov/shm/src/smr_ep.c @@ -202,9 +202,9 @@ int64_t smr_verify_peer(struct smr_ep *ep, fi_addr_t fi_addr) return id; if (!ep->map->peers[id].region) { - ofi_spin_lock(&ep->map->lock); + ofi_mutex_lock(&ep->util_ep.av->lock); ret = smr_map_to_region(&smr_prov, ep->map, id); - ofi_spin_unlock(&ep->map->lock); + ofi_mutex_unlock(&ep->util_ep.av->lock); if (ret) return -1; } diff --git a/prov/shm/src/smr_progress.c b/prov/shm/src/smr_progress.c index bbdb4ab655d..59b5f291fdf 100644 --- a/prov/shm/src/smr_progress.c +++ b/prov/shm/src/smr_progress.c @@ -866,22 +866,16 @@ static void smr_progress_connreq(struct smr_ep *ep, struct smr_cmd *cmd) int64_t idx = -1; int ret = 0; - ret = smr_map_add(&smr_prov, ep->map, (char *) cmd->data.msg, &idx); - if (ret || idx < 0) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "Error processing mapping request\n"); - return; - } + ofi_mutex_lock(&ep->util_ep.av->lock); + smr_map_add(&smr_prov, ep->map, (char *) cmd->data.msg, &idx); peer_smr = smr_peer_region(ep, idx); if (!peer_smr) { - ofi_spin_lock(&ep->map->lock); ret = smr_map_to_region(&smr_prov, ep->map, idx); - ofi_spin_unlock(&ep->map->lock); if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "Could not map peer region\n"); - return; + goto out; } peer_smr = smr_peer_region(ep, idx); } @@ -891,10 +885,10 @@ static void smr_progress_connreq(struct smr_ep *ep, struct smr_cmd *cmd) /* TODO track and update/complete in error any transfers * to or from old mapping */ - ofi_spin_lock(&ep->map->lock); + ofi_mutex_lock(&ep->util_ep.av->lock); smr_unmap_region(&smr_prov, ep->map, idx, false); smr_map_to_region(&smr_prov, ep->map, idx); - ofi_spin_unlock(&ep->map->lock); + ofi_mutex_unlock(&ep->util_ep.av->lock); peer_smr = smr_peer_region(ep, idx); } @@ -906,6 +900,8 @@ static void smr_progress_connreq(struct smr_ep *ep, struct smr_cmd *cmd) ep->region->max_sar_buf_per_peer = MIN( SMR_BUF_BATCH_MAX, SMR_MAX_PEERS / ep->map->num_peers); +out: + ofi_mutex_unlock(&ep->util_ep.av->lock); } static int smr_alloc_cmd_ctx(struct smr_ep *ep, diff --git a/prov/shm/src/smr_util.c b/prov/shm/src/smr_util.c index 53eb67d18b1..296eda76633 100644 --- a/prov/shm/src/smr_util.c +++ b/prov/shm/src/smr_util.c @@ -332,7 +332,7 @@ int smr_map_to_region(const struct fi_provider *prov, struct smr_map *map, struct smr_region *peer; struct util_ep *util_ep; struct smr_ep *smr_ep; - struct smr_av *av; + struct smr_av *av = container_of(map, struct smr_av, smr_map); size_t size; int fd, ret = 0; struct stat sts; @@ -353,7 +353,7 @@ int smr_map_to_region(const struct fi_provider *prov, struct smr_map *map, if (peer_buf->region) return FI_SUCCESS; - assert(ofi_spin_held(&map->lock)); + assert(ofi_mutex_held(&av->util_av.lock)); fd = shm_open(name, O_RDWR, S_IRUSR | S_IWUSR); if (fd < 0) { FI_WARN_ONCE(prov, FI_LOG_AV, @@ -410,7 +410,6 @@ int smr_map_to_region(const struct fi_provider *prov, struct smr_map *map, } } - av = container_of(map, struct smr_av, smr_map); dlist_foreach_container(&av->util_av.ep_list, struct util_ep, util_ep, av_entry) { smr_ep = container_of(util_ep, struct smr_ep, util_ep); @@ -428,7 +427,8 @@ void smr_map_to_endpoint(struct smr_ep *ep, int64_t id) struct smr_region *peer_smr; struct smr_peer_data *local_peers; - assert(ofi_spin_held(&ep->map->lock)); + assert(ofi_mutex_held(&container_of(ep->util_ep.av, struct smr_av, + util_av)->util_av.lock)); peer_smr = smr_peer_region(ep, id); if (!ep->map->peers[id].id_assigned || !peer_smr) return; @@ -472,13 +472,14 @@ void smr_unmap_region(const struct fi_provider *prov, struct smr_map *map, struct smr_av *av; int ret = 0; - assert(ofi_spin_held(&map->lock)); + av = container_of(map, struct smr_av, smr_map); + + assert(ofi_mutex_held(&av->util_av.lock)); peer_region = map->peers[peer_id].region; if (!peer_region) return; peer = &map->peers[peer_id]; - av = container_of(map, struct smr_av, smr_map); dlist_foreach_container(&av->util_av.ep_list, struct util_ep, util_ep, av_entry) { smr_ep = container_of(util_ep, struct smr_ep, util_ep); @@ -532,26 +533,28 @@ void smr_exchange_all_peers(struct smr_ep *ep) { int64_t i; - ofi_spin_lock(&ep->map->lock); + ofi_mutex_lock(&ep->util_ep.av->lock); for (i = 0; i < SMR_MAX_PEERS; i++) smr_map_to_endpoint(ep, i); - ofi_spin_unlock(&ep->map->lock); + ofi_mutex_unlock(&ep->util_ep.av->lock); } -int smr_map_add(const struct fi_provider *prov, struct smr_map *map, - const char *name, int64_t *id) +void smr_map_add(const struct fi_provider *prov, struct smr_map *map, + const char *name, int64_t *id) { struct ofi_rbnode *node; int tries = 0, ret = 0; - ofi_spin_lock(&map->lock); + assert(ofi_mutex_held(&container_of(map, struct smr_av, + smr_map)->util_av.lock)); + ret = ofi_rbmap_insert(&map->rbmap, (void *) name, (void *) (intptr_t) *id, &node); if (ret) { assert(ret == -FI_EALREADY); *id = (intptr_t) node->data; - goto out; + return; } while (map->peers[map->cur_id].id_assigned && tries < SMR_MAX_PEERS) { @@ -570,10 +573,6 @@ int smr_map_add(const struct fi_provider *prov, struct smr_map *map, map->peers[*id].region = NULL; map->num_peers++; map->peers[*id].id_assigned = true; - -out: - ofi_spin_unlock(&map->lock); - return FI_SUCCESS; } void smr_map_del(struct smr_map *map, int64_t id) @@ -581,6 +580,9 @@ void smr_map_del(struct smr_map *map, int64_t id) struct smr_ep_name *name; bool local = false; + assert(ofi_mutex_held(&container_of(map, struct smr_av, + smr_map)->util_av.lock)); + assert(id >= 0 && id < SMR_MAX_PEERS); pthread_mutex_lock(&ep_list_lock); dlist_foreach_container(&ep_name_list, struct smr_ep_name, name, @@ -591,13 +593,13 @@ void smr_map_del(struct smr_map *map, int64_t id) } } pthread_mutex_unlock(&ep_list_lock); - ofi_spin_lock(&map->lock); + + smr_unmap_region(&smr_prov, map, id, local); map->peers[id].fiaddr = FI_ADDR_NOTAVAIL; map->peers[id].id_assigned = false; map->num_peers--; ofi_rbmap_find_delete(&map->rbmap, map->peers[id].name); - ofi_spin_unlock(&map->lock); } struct smr_region *smr_map_get(struct smr_map *map, int64_t id) From 3141b65433320ad051064f9b7d1dc25793b8877f Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Fri, 20 Dec 2024 14:19:24 -0800 Subject: [PATCH 08/13] prov/shm: remove smr_fabric There is nothing in smr_fabric, just use the util_fabric directly Signed-off-by: Alexia Ingerson --- prov/shm/src/smr.h | 4 ---- prov/shm/src/smr_domain.c | 7 ++----- prov/shm/src/smr_fabric.c | 14 ++++++++------ 3 files changed, 10 insertions(+), 15 deletions(-) diff --git a/prov/shm/src/smr.h b/prov/shm/src/smr.h index 74207d861ce..5d40139b250 100644 --- a/prov/shm/src/smr.h +++ b/prov/shm/src/smr.h @@ -518,10 +518,6 @@ struct smr_cmd_ctx { OFI_DECLARE_FREESTACK(struct smr_tx_entry, smr_tx_fs); -struct smr_fabric { - struct util_fabric util_fabric; -}; - struct smr_domain { struct util_domain util_domain; int fast_rma; diff --git a/prov/shm/src/smr_domain.c b/prov/shm/src/smr_domain.c index 72a3be5c40a..2a0cf82894a 100644 --- a/prov/shm/src/smr_domain.c +++ b/prov/shm/src/smr_domain.c @@ -157,7 +157,6 @@ int smr_domain_open(struct fid_fabric *fabric, struct fi_info *info, { int ret; struct smr_domain *smr_domain; - struct smr_fabric *smr_fabric; ret = ofi_prov_check_info(&smr_util_prov, fabric->api_version, info); if (ret) @@ -174,12 +173,10 @@ int smr_domain_open(struct fid_fabric *fabric, struct fi_info *info, return ret; } - smr_fabric = container_of(fabric, struct smr_fabric, - util_fabric.fabric_fid); - ofi_mutex_lock(&smr_fabric->util_fabric.lock); + ofi_mutex_lock(&smr_domain->util_domain.fabric->lock); smr_domain->fast_rma = smr_fast_rma_enabled(info->domain_attr->mr_mode, info->tx_attr->msg_order); - ofi_mutex_unlock(&smr_fabric->util_fabric.lock); + ofi_mutex_unlock(&smr_domain->util_domain.fabric->lock); ret = ofi_ipc_cache_open(&smr_domain->ipc_cache, &smr_domain->util_domain); diff --git a/prov/shm/src/smr_fabric.c b/prov/shm/src/smr_fabric.c index fab9b2b583f..910b5789dd4 100644 --- a/prov/shm/src/smr_fabric.c +++ b/prov/shm/src/smr_fabric.c @@ -60,10 +60,12 @@ static int smr_fabric_close(fid_t fid) { int ret; struct util_fabric *fabric; + fabric = container_of(fid, struct util_fabric, fabric_fid.fid); ret = ofi_fabric_close(fabric); if (ret) return ret; + free(fabric); return 0; } @@ -80,20 +82,20 @@ int smr_fabric(struct fi_fabric_attr *attr, struct fid_fabric **fabric, void *context) { int ret; - struct smr_fabric *smr_fabric; + struct util_fabric *util_fabric; - smr_fabric = calloc(1, sizeof(*smr_fabric)); - if (!smr_fabric) + util_fabric = calloc(1, sizeof(*util_fabric)); + if (!util_fabric) return -FI_ENOMEM; ret = ofi_fabric_init(&smr_prov, smr_info.fabric_attr, attr, - &smr_fabric->util_fabric, context); + util_fabric, context); if (ret) { - free(smr_fabric); + free(util_fabric); return ret; } - *fabric = &smr_fabric->util_fabric.fabric_fid; + *fabric = &util_fabric->fabric_fid; (*fabric)->fid.ops = &smr_fabric_fi_ops; (*fabric)->ops = &smr_fabric_ops; return 0; From 3b57cd248e2c27b161919175c874ab13de1d1b94 Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Fri, 20 Dec 2024 14:24:34 -0800 Subject: [PATCH 09/13] prov/shm: refactor progress functions into function pointers Just like on the send side, make the progress functions be an array of function pointers accessible by the command proto. This cleans up the parameters of the progress calls and streamlines the calls This also renames the proto_ops to send_ops to make the two more clear Signed-off-by: Alexia Ingerson --- prov/shm/src/smr.h | 10 +- prov/shm/src/smr_comp.c | 4 +- prov/shm/src/smr_ep.c | 2 +- prov/shm/src/smr_msg.c | 10 +- prov/shm/src/smr_progress.c | 269 ++++++++++++------------------------ prov/shm/src/smr_rma.c | 10 +- 6 files changed, 110 insertions(+), 195 deletions(-) diff --git a/prov/shm/src/smr.h b/prov/shm/src/smr.h index 5d40139b250..61a044b3840 100644 --- a/prov/shm/src/smr.h +++ b/prov/shm/src/smr.h @@ -587,13 +587,13 @@ size_t smr_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, int smr_select_proto(void **desc, size_t iov_count, bool cma_avail, bool ipc_valid, uint32_t op, uint64_t total_len, uint64_t op_flags); -typedef ssize_t (*smr_proto_func)( +typedef ssize_t (*smr_send_func)( struct smr_ep *ep, struct smr_region *peer_smr, int64_t id, int64_t peer_id, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags, struct ofi_mr **desc, const struct iovec *iov, size_t iov_count, size_t total_len, void *context, struct smr_cmd *cmd); -extern smr_proto_func smr_proto_ops[smr_proto_max]; +extern smr_send_func smr_send_ops[smr_proto_max]; int smr_write_err_comp(struct util_cq *cq, void *context, uint64_t flags, uint64_t tag, int err); @@ -603,6 +603,12 @@ int smr_complete_rx(struct smr_ep *ep, void *context, uint32_t op, uint64_t flags, size_t len, void *buf, int64_t id, uint64_t tag, uint64_t data); +typedef ssize_t (*smr_progress_func)( + struct smr_ep *ep, struct smr_cmd *cmd, + struct fi_peer_rx_entry *rx_entry, struct ofi_mr **mr, + struct iovec *iov, size_t iov_count); +extern smr_progress_func smr_progress_ops[smr_proto_max]; + static inline uint64_t smr_rx_cq_flags(uint64_t rx_flags, uint16_t op_flags) { if (op_flags & SMR_REMOTE_CQ_DATA) diff --git a/prov/shm/src/smr_comp.c b/prov/shm/src/smr_comp.c index 62d9a4d26ed..2ea5c1314de 100644 --- a/prov/shm/src/smr_comp.c +++ b/prov/shm/src/smr_comp.c @@ -54,8 +54,8 @@ int smr_write_err_comp(struct util_cq *cq, void *context, err_entry.op_context = context; err_entry.flags = flags; err_entry.tag = tag; - err_entry.err = err; - err_entry.prov_errno = -err; + err_entry.err = -err; + err_entry.prov_errno = err; return ofi_peer_cq_write_error(cq, &err_entry); } diff --git a/prov/shm/src/smr_ep.c b/prov/shm/src/smr_ep.c index 636d7ec6ae4..6c473fa3991 100644 --- a/prov/shm/src/smr_ep.c +++ b/prov/shm/src/smr_ep.c @@ -601,7 +601,7 @@ static ssize_t smr_do_ipc(struct smr_ep *ep, struct smr_region *peer_smr, return FI_SUCCESS; } -smr_proto_func smr_proto_ops[smr_proto_max] = { +smr_send_func smr_send_ops[smr_proto_max] = { [smr_proto_inline] = &smr_do_inline, [smr_proto_inject] = &smr_do_inject, [smr_proto_iov] = &smr_do_iov, diff --git a/prov/shm/src/smr_msg.c b/prov/shm/src/smr_msg.c index 4994d98824c..e46c284ce59 100644 --- a/prov/shm/src/smr_msg.c +++ b/prov/shm/src/smr_msg.c @@ -126,9 +126,9 @@ static ssize_t smr_generic_sendmsg(struct smr_ep *ep, const struct iovec *iov, ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); } - ret = smr_proto_ops[proto](ep, peer_smr, id, peer_id, op, tag, data, - op_flags, (struct ofi_mr **) desc, iov, - iov_count, total_len, context, cmd); + ret = smr_send_ops[proto](ep, peer_smr, id, peer_id, op, tag, data, + op_flags, (struct ofi_mr **) desc, iov, + iov_count, total_len, context, cmd); if (ret) { smr_cmd_queue_discard(ce, pos); if (proto != smr_proto_inline) @@ -249,8 +249,8 @@ static ssize_t smr_generic_inject(struct fid_ep *ep_fid, const void *buf, ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); } - ret = smr_proto_ops[proto](ep, peer_smr, id, peer_id, op, tag, data, - op_flags, NULL, &msg_iov, 1, len, NULL, cmd); + ret = smr_send_ops[proto](ep, peer_smr, id, peer_id, op, tag, data, + op_flags, NULL, &msg_iov, 1, len, NULL, cmd); if (ret) { smr_cmd_queue_discard(ce, pos); ret = -FI_EAGAIN; diff --git a/prov/shm/src/smr_progress.c b/prov/shm/src/smr_progress.c index 59b5f291fdf..63ddaaa5d32 100644 --- a/prov/shm/src/smr_progress.c +++ b/prov/shm/src/smr_progress.c @@ -214,77 +214,66 @@ static void smr_progress_return(struct smr_ep *ep) ofi_genlock_unlock(&ep->util_ep.lock); } -static int smr_progress_inline(struct smr_cmd *cmd, struct ofi_mr **mr, - struct iovec *iov, size_t iov_count, - size_t *total_len) +ssize_t smr_progress_inline(struct smr_ep *ep, struct smr_cmd *cmd, + struct fi_peer_rx_entry *rx_entry, + struct ofi_mr **mr, struct iovec *iov, + size_t iov_count) { - ssize_t hmem_copy_ret; + ssize_t ret; - hmem_copy_ret = ofi_copy_to_mr_iov(mr, iov, iov_count, 0, - cmd->data.msg, cmd->hdr.size); - if (hmem_copy_ret < 0) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "inline recv failed with code %d\n", - (int)(-hmem_copy_ret)); - return hmem_copy_ret; - } else if (hmem_copy_ret != cmd->hdr.size) { + ret = ofi_copy_to_mr_iov(mr, iov, iov_count, 0, cmd->data.msg, + cmd->hdr.size); + if (ret < 0) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "inline recv truncated\n"); + "inline recv failed with code %d\n", (int)(-ret)); + return ret; + } + if (ret != cmd->hdr.size) { + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "inline recv truncated\n"); return -FI_ETRUNC; } - - *total_len = hmem_copy_ret; - return FI_SUCCESS; } -static int smr_progress_inject(struct smr_cmd *cmd, struct ofi_mr **mr, - struct iovec *iov, size_t iov_count, - size_t *total_len, struct smr_ep *ep, int err) +ssize_t smr_progress_inject(struct smr_ep *ep, struct smr_cmd *cmd, + struct fi_peer_rx_entry *rx_entry, + struct ofi_mr **mr, struct iovec *iov, + size_t iov_count) { struct smr_region *peer_smr; struct smr_inject_buf *tx_buf; - size_t inj_offset; - ssize_t hmem_copy_ret; - - inj_offset = (size_t) cmd->hdr.proto_data; + ssize_t ret; peer_smr = smr_peer_region(ep, cmd->hdr.id); - tx_buf = smr_get_ptr(peer_smr, inj_offset); - if (err) - goto out; + tx_buf = smr_get_ptr(peer_smr, (size_t) cmd->hdr.proto_data); if (cmd->hdr.op == ofi_op_read_req) { - hmem_copy_ret = ofi_copy_from_mr_iov(tx_buf->data, - cmd->hdr.size, mr, iov, - iov_count, 0); + ret = ofi_copy_from_mr_iov(tx_buf->data, cmd->hdr.size, mr, + iov, iov_count, 0); } else { - hmem_copy_ret = ofi_copy_to_mr_iov(mr, iov, iov_count, 0, - tx_buf->data, cmd->hdr.size); + ret = ofi_copy_to_mr_iov(mr, iov, iov_count, 0, tx_buf->data, + cmd->hdr.size); } - if (hmem_copy_ret < 0) { + if (ret < 0) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "inject recv failed with code %d\n", - (int)(-hmem_copy_ret)); - err = hmem_copy_ret; - } else if (hmem_copy_ret != cmd->hdr.size) { + "inject recv failed with code %lu\n", ret); + } else if (ret != cmd->hdr.size) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "inject recv truncated\n"); - err = -FI_ETRUNC; + ret = -FI_ETRUNC; } else { - *total_len = hmem_copy_ret; - err = FI_SUCCESS; + ret = FI_SUCCESS; } -out: + cmd->hdr.status = ret; smr_return_cmd(ep, cmd); - return err; + return ret; } -static int smr_progress_iov(struct smr_cmd *cmd, struct iovec *iov, - size_t iov_count, size_t *total_len, - struct smr_ep *ep) +ssize_t smr_progress_iov(struct smr_ep *ep, struct smr_cmd *cmd, + struct fi_peer_rx_entry *rx_entry, struct ofi_mr **mr, + struct iovec *iov, size_t iov_count) { struct smr_region *peer_smr; struct ofi_xpmem_client *xpmem; @@ -298,12 +287,8 @@ static int smr_progress_iov(struct smr_cmd *cmd, struct iovec *iov, cmd->data.iov_count, cmd->hdr.size, peer_smr->pid, cmd->hdr.op == ofi_op_read_req, xpmem); - if (!ret) - *total_len = cmd->hdr.size; - cmd->hdr.status = ret; smr_return_cmd(ep, cmd); - return ret; } @@ -418,16 +403,15 @@ static int smr_progress_pending(struct smr_ep *ep, struct smr_cmd *cmd) } } -static struct smr_pend_entry *smr_progress_sar(struct smr_cmd *cmd, - struct fi_peer_rx_entry *rx_entry, struct ofi_mr **mr, - struct iovec *iov, size_t iov_count, - size_t *total_len, struct smr_ep *ep) +ssize_t smr_progress_sar(struct smr_ep *ep, struct smr_cmd *cmd, + struct fi_peer_rx_entry *rx_entry, struct ofi_mr **mr, + struct iovec *iov, size_t iov_count) { struct smr_region *peer_smr; struct smr_pend_entry *sar_entry = NULL; struct iovec sar_iov[SMR_IOV_LIMIT]; size_t bytes_done = 0; - int ret; + ssize_t ret = FI_SUCCESS; peer_smr = smr_peer_region(ep, cmd->hdr.id); @@ -444,8 +428,7 @@ static struct smr_pend_entry *smr_progress_sar(struct smr_cmd *cmd, else ret = smr_copy_from_sar(ep, smr_sar_pool(peer_smr), cmd, mr, sar_iov, iov_count, &bytes_done); - if (ret) - cmd->hdr.status = ret; + cmd->hdr.status = ret; if (bytes_done == cmd->hdr.size) goto out; @@ -462,18 +445,15 @@ static struct smr_pend_entry *smr_progress_sar(struct smr_cmd *cmd, memcpy(sar_entry->mr, mr, sizeof(*mr) * iov_count); else memset(sar_entry->mr, 0, sizeof(*mr) * iov_count); - - *total_len = cmd->hdr.size; out: smr_return_cmd(ep, cmd); - return sar_entry; + return ret; } -static int smr_ipc_async_copy(struct smr_ep *ep, void *ptr, +static int smr_ipc_async_copy(struct smr_ep *ep, struct smr_cmd *cmd, struct fi_peer_rx_entry *rx_entry, - struct iovec *iov, size_t iov_count, struct ofi_mr_entry *mr_entry, - struct smr_cmd *cmd, struct smr_pend_entry **pend) + struct iovec *iov, size_t iov_count, void *ptr) { struct smr_pend_entry *ipc_entry; enum fi_hmem_iface iface = cmd->data.ipc_info.iface; @@ -484,16 +464,13 @@ static int smr_ipc_async_copy(struct smr_ep *ep, void *ptr, if (!ipc_entry) return -FI_ENOMEM; + cmd->hdr.rx_ctx = (uintptr_t) ipc_entry; ipc_entry->ipc_entry = mr_entry; ipc_entry->bytes_done = 0; ipc_entry->cmd = cmd; memcpy(ipc_entry->iov, iov, sizeof(*iov) * iov_count); ipc_entry->iov_count = iov_count; ipc_entry->rx_entry = rx_entry; - if (rx_entry) { - if (cmd->hdr.op_flags & SMR_REMOTE_CQ_DATA) - ipc_entry->rx_entry->flags |= FI_REMOTE_CQ_DATA; - } ret = ofi_create_async_copy_event(iface, device, &ipc_entry->async_event); @@ -510,13 +487,10 @@ static int smr_ipc_async_copy(struct smr_ep *ep, void *ptr, 0, ptr, cmd->hdr.size, ipc_entry->async_event); } - if (ret < 0) goto fail; dlist_insert_tail(&ipc_entry->entry, &ep->ipc_cpy_pend_list); - *pend = ipc_entry; - return FI_SUCCESS; fail: @@ -524,18 +498,14 @@ static int smr_ipc_async_copy(struct smr_ep *ep, void *ptr, return ret; } -static struct smr_pend_entry *smr_progress_ipc(struct smr_cmd *cmd, - struct fi_peer_rx_entry *rx_entry, - struct ofi_mr **mr, struct iovec *iov, - size_t iov_count, size_t *total_len, - struct smr_ep *ep, int *err) +ssize_t smr_progress_ipc(struct smr_ep *ep, struct smr_cmd *cmd, + struct fi_peer_rx_entry *rx_entry, struct ofi_mr **mr, + struct iovec *iov, size_t iov_count) { void *ptr; int ret; - ssize_t hmem_copy_ret; struct ofi_mr_entry *mr_entry; struct smr_domain *domain; - struct smr_pend_entry *ipc_entry; domain = container_of(ep->util_ep.domain, struct smr_domain, util_domain); @@ -554,48 +524,44 @@ static struct smr_pend_entry *smr_progress_ipc(struct smr_cmd *cmd, (uintptr_t) cmd->data.ipc_info.offset; if (cmd->data.ipc_info.iface == FI_HMEM_ROCR) { - *total_len = 0; - ipc_entry = NULL; - - ret = smr_ipc_async_copy(ep, (char*)ptr, rx_entry, iov, - iov_count, mr_entry, cmd, - &ipc_entry); - if (ret) { - cmd->hdr.status = ret; + ret = smr_ipc_async_copy(ep, cmd, rx_entry, mr_entry, iov, + iov_count, ptr); + if (ret) goto uncache; - } - - return ipc_entry; + return FI_SUCCESS; } if (cmd->hdr.op == ofi_op_read_req) { - hmem_copy_ret = ofi_copy_from_hmem_iov(ptr, cmd->hdr.size, - cmd->data.ipc_info.iface, - cmd->data.ipc_info.device, iov, - iov_count, 0); + ret = ofi_copy_from_hmem_iov(ptr, cmd->hdr.size, + cmd->data.ipc_info.iface, + cmd->data.ipc_info.device, iov, + iov_count, 0); } else { - hmem_copy_ret = ofi_copy_to_hmem_iov(cmd->data.ipc_info.iface, - cmd->data.ipc_info.device, iov, - iov_count, 0, ptr, cmd->hdr.size); + ret = ofi_copy_to_hmem_iov(cmd->data.ipc_info.iface, + cmd->data.ipc_info.device, iov, + iov_count, 0, ptr, cmd->hdr.size); } - if (hmem_copy_ret < 0) - *err = hmem_copy_ret; - else if (hmem_copy_ret != cmd->hdr.size) - *err = -FI_ETRUNC; - else - *err = FI_SUCCESS; - - *total_len = hmem_copy_ret; - + if (ret == cmd->hdr.size) + ret = FI_SUCCESS; + else if (ret > 0) + ret = -FI_ETRUNC; uncache: ofi_mr_cache_delete(domain->ipc_cache, mr_entry); out: cmd->hdr.status = ret; smr_return_cmd(ep, cmd); - return NULL; + return ret; } +smr_progress_func smr_progress_ops[smr_proto_max] = { + [smr_proto_inline] = &smr_progress_inline, + [smr_proto_inject] = &smr_progress_inject, + [smr_proto_iov] = &smr_progress_iov, + [smr_proto_sar] = &smr_progress_sar, + [smr_proto_ipc] = &smr_progress_ipc, +}; + static void smr_do_atomic(struct smr_cmd *cmd, void *src, struct ofi_mr *dst_mr, void *dst, void *cmp, enum fi_datatype datatype, enum fi_op op, size_t cnt, uint16_t flags) @@ -711,63 +677,33 @@ static int smr_progress_inject_atomic(struct smr_cmd *cmd, struct ofi_mr **mr, static int smr_start_common(struct smr_ep *ep, struct smr_cmd *cmd, struct fi_peer_rx_entry *rx_entry) { - struct smr_pend_entry *pend = NULL; - size_t total_len = 0; uint64_t comp_flags; void *comp_buf; int ret; - int err = 0; - switch (cmd->hdr.proto) { - case smr_proto_inline: - err = smr_progress_inline(cmd, - (struct ofi_mr **) rx_entry->desc, - rx_entry->iov, rx_entry->count, &total_len); - break; - case smr_proto_inject: - err = smr_progress_inject(cmd, - (struct ofi_mr **) rx_entry->desc, - rx_entry->iov, rx_entry->count, &total_len, - ep, 0); - break; - case smr_proto_iov: - err = smr_progress_iov(cmd, rx_entry->iov, rx_entry->count, - &total_len, ep); - break; - case smr_proto_sar: - pend = smr_progress_sar(cmd, rx_entry, - (struct ofi_mr **) rx_entry->desc, - rx_entry->iov, rx_entry->count, - &total_len, ep); - break; - case smr_proto_ipc: - pend = smr_progress_ipc(cmd, rx_entry, - (struct ofi_mr **) rx_entry->desc, - rx_entry->iov, rx_entry->count, - &total_len, ep, &err); - break; - default: - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "unidentified operation type\n"); - err = -FI_EINVAL; - } + assert (cmd->hdr.proto < smr_proto_max); + ret = smr_progress_ops[cmd->hdr.proto]( + ep, cmd, rx_entry, + (struct ofi_mr **) rx_entry->desc, + rx_entry->iov, rx_entry->count); - if (!pend) { + if (!cmd->hdr.rx_ctx) { comp_buf = rx_entry->iov[0].iov_base; comp_flags = smr_rx_cq_flags(rx_entry->flags, cmd->hdr.op_flags); - if (err) { + if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "error processing op\n"); ret = smr_write_err_comp(ep->util_ep.rx_cq, rx_entry->context, comp_flags, rx_entry->tag, - -err); + ret); } else { ret = smr_complete_rx(ep, rx_entry->context, cmd->hdr.op, comp_flags, - total_len, comp_buf, cmd->hdr.id, - cmd->hdr.tag, cmd->hdr.cq_data); + cmd->hdr.size, comp_buf, + cmd->hdr.id, cmd->hdr.tag, + cmd->hdr.cq_data); } if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -1032,8 +968,8 @@ static int smr_progress_cmd_rma(struct smr_ep *ep, struct smr_cmd *cmd) struct smr_domain *domain; struct iovec iov[SMR_IOV_LIMIT]; struct fi_rma_iov *rma_iov; - size_t iov_count, total_len = 0; - int err = 0, ret = 0; + size_t iov_count; + int ret = 0; struct ofi_mr *mr[SMR_IOV_LIMIT]; if (cmd->hdr.rx_ctx) @@ -1061,46 +997,19 @@ static int smr_progress_cmd_rma(struct smr_ep *ep, struct smr_cmd *cmd) if (ret) goto out; - switch (cmd->hdr.proto) { - case smr_proto_inline: - err = smr_progress_inline(cmd, mr, iov, iov_count, &total_len); - break; - case smr_proto_inject: - err = smr_progress_inject(cmd, mr, iov, iov_count, &total_len, - ep, ret); - if (cmd->hdr.op == ofi_op_read_req && cmd->hdr.cq_data) - cmd->hdr.status = err; - break; - case smr_proto_iov: - err = smr_progress_iov(cmd, iov, iov_count, &total_len, ep); - break; - case smr_proto_sar: - if (smr_progress_sar(cmd, NULL, mr, iov, iov_count, &total_len, - ep)) - return ret; - break; - case smr_proto_ipc: - if (smr_progress_ipc(cmd, NULL, mr, iov, iov_count, &total_len, - ep, &ret)) - return ret; - break; - default: - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "unidentified operation type\n"); - err = -FI_EINVAL; - } - - if (err) { + assert(cmd->hdr.proto < smr_proto_max); + ret = smr_progress_ops[cmd->hdr.proto](ep, cmd, NULL, mr, iov, + iov_count); + if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "error processing rma op\n"); ret = smr_write_err_comp(ep->util_ep.rx_cq, NULL, smr_rx_cq_flags(0, cmd->hdr.op_flags), - 0, -err); + 0, ret); } else { - ret = smr_complete_rx(ep, (void *) cmd->hdr.tx_ctx, - cmd->hdr.op, + ret = smr_complete_rx(ep, NULL, cmd->hdr.op, smr_rx_cq_flags(0, cmd->hdr.op_flags), - total_len, + cmd->hdr.size, iov_count ? iov[0].iov_base : NULL, cmd->hdr.id, 0, cmd->hdr.cq_data); } diff --git a/prov/shm/src/smr_rma.c b/prov/shm/src/smr_rma.c index 426b997528c..bc31f30a877 100644 --- a/prov/shm/src/smr_rma.c +++ b/prov/shm/src/smr_rma.c @@ -185,9 +185,9 @@ static ssize_t smr_generic_rma( cmd = &ce->cmd; ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); } - ret = smr_proto_ops[proto](ep, peer_smr, id, peer_id, op, 0, data, - op_flags, (struct ofi_mr **)desc, iov, - iov_count, total_len, context, cmd); + ret = smr_send_ops[proto](ep, peer_smr, id, peer_id, op, 0, data, + op_flags, (struct ofi_mr **)desc, iov, + iov_count, total_len, context, cmd); if (ret) { if (proto != smr_proto_inline) smr_freestack_push(smr_cmd_stack(ep->region), cmd); @@ -389,8 +389,8 @@ static ssize_t smr_generic_rma_inject(struct fid_ep *ep_fid, const void *buf, ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); } - ret = smr_proto_ops[proto](ep, peer_smr, id, peer_id, ofi_op_write, 0, - data, flags, NULL, &iov, 1, len, NULL, cmd); + ret = smr_send_ops[proto](ep, peer_smr, id, peer_id, ofi_op_write, 0, + data, flags, NULL, &iov, 1, len, NULL, cmd); if (ret) { smr_cmd_queue_discard(ce, pos); goto unlock; From a625db0012ff6769b400c3eee49e9d278ba201a7 Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Tue, 31 Dec 2024 15:40:47 -0800 Subject: [PATCH 10/13] prov/shm: merge tx and pend entries for simple management of pending operations Signed-off-by: Alexia Ingerson --- prov/shm/src/smr.h | 58 ++++++++--------- prov/shm/src/smr_atomic.c | 8 +-- prov/shm/src/smr_ep.c | 61 +++++++++--------- prov/shm/src/smr_progress.c | 124 ++++++++++++++++++------------------ 4 files changed, 121 insertions(+), 130 deletions(-) diff --git a/prov/shm/src/smr.h b/prov/shm/src/smr.h index 61a044b3840..4f926800b5f 100644 --- a/prov/shm/src/smr.h +++ b/prov/shm/src/smr.h @@ -319,9 +319,8 @@ struct smr_ep { struct fid_peer_srx *srx; struct ofi_bufpool *cmd_ctx_pool; struct ofi_bufpool *unexp_buf_pool; - struct ofi_bufpool *pend_buf_pool; + struct ofi_bufpool *pend_pool; - struct smr_tx_fs *tx_fs; struct slist overflow_list; struct dlist_entry ipc_cpy_pend_list; size_t min_multi_recv_size; @@ -482,42 +481,37 @@ int smr_query_atomic(struct fid_domain *domain, enum fi_datatype datatype, enum fi_op op, struct fi_atomic_attr *attr, uint64_t flags); -struct smr_tx_entry { - int64_t peer_id; - void *context; - struct iovec iov[SMR_IOV_LIMIT]; - uint32_t iov_count; - uint64_t op_flags; - size_t bytes_done; - void *map_ptr; - struct smr_ep_name *map_name; - struct ofi_mr *mr[SMR_IOV_LIMIT]; -}; - struct smr_pend_entry { - struct dlist_entry entry; - struct smr_cmd *cmd; - struct fi_peer_rx_entry *rx_entry; - struct smr_cmd_ctx *cmd_ctx; - size_t bytes_done; + union { + struct { + int64_t peer_id; + void *context; + uint64_t op_flags; + } tx; + struct { + struct dlist_entry entry; + struct smr_cmd *cmd; + struct fi_peer_rx_entry *rx_entry; + struct ofi_mr_entry *ipc_entry; + ofi_hmem_async_event_t async_event; + } rx; + }; struct iovec iov[SMR_IOV_LIMIT]; size_t iov_count; struct ofi_mr *mr[SMR_IOV_LIMIT]; - struct ofi_mr_entry *ipc_entry; - ofi_hmem_async_event_t async_event; + size_t bytes_done; }; struct smr_cmd_ctx { - struct dlist_entry entry; - struct smr_ep *ep; - struct smr_cmd *cmd; - struct smr_cmd cmd_cpy; - char msg[SMR_MSG_DATA_LEN]; - struct slist buf_list; + struct dlist_entry entry; + struct smr_ep *ep; + struct smr_pend_entry *pend; + struct smr_cmd *cmd; + struct smr_cmd cmd_cpy; + char msg[SMR_MSG_DATA_LEN]; + struct slist buf_list; }; -OFI_DECLARE_FREESTACK(struct smr_tx_entry, smr_tx_fs); - struct smr_domain { struct util_domain util_domain; int fast_rma; @@ -571,9 +565,9 @@ int smr_cntr_open(struct fid_domain *domain, struct fi_cntr_attr *attr, int64_t smr_verify_peer(struct smr_ep *ep, fi_addr_t fi_addr); -void smr_format_pend(struct smr_tx_entry *pend, void *context, - struct ofi_mr **mr, const struct iovec *iov, - uint32_t iov_count, uint64_t op_flags, int64_t id); +void smr_format_tx_pend(struct smr_pend_entry *pend, void *context, + struct ofi_mr **mr, const struct iovec *iov, + uint32_t iov_count, uint64_t op_flags, int64_t id); void smr_generic_format(struct smr_cmd *cmd, int64_t peer_id, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags); size_t smr_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, diff --git a/prov/shm/src/smr_atomic.c b/prov/shm/src/smr_atomic.c index 24c1f5dd090..e4b53166bda 100644 --- a/prov/shm/src/smr_atomic.c +++ b/prov/shm/src/smr_atomic.c @@ -122,7 +122,7 @@ static ssize_t smr_do_atomic_inject( uint16_t smr_flags, struct smr_cmd *cmd) { struct smr_inject_buf *tx_buf; - struct smr_tx_entry *pend; + struct smr_pend_entry *pend; tx_buf = smr_freestack_pop(smr_inject_pool(ep->region)); assert(tx_buf); @@ -135,11 +135,11 @@ static ssize_t smr_do_atomic_inject( if (op == ofi_op_atomic_fetch || op == ofi_op_atomic_compare || atomic_op == FI_ATOMIC_READ || op_flags & FI_DELIVERY_COMPLETE) { - pend = ofi_freestack_pop(ep->tx_fs); + pend = ofi_buf_alloc(ep->pend_pool); assert(pend); cmd->hdr.tx_ctx = (uintptr_t) pend; - smr_format_pend(pend, context, res_desc, resultv, - result_count, op_flags, id); + smr_format_tx_pend(pend, context, res_desc, resultv, + result_count, op_flags, id); } else { cmd->hdr.tx_ctx = 0; } diff --git a/prov/shm/src/smr_ep.c b/prov/shm/src/smr_ep.c index 6c473fa3991..0903b6d6be0 100644 --- a/prov/shm/src/smr_ep.c +++ b/prov/shm/src/smr_ep.c @@ -214,15 +214,16 @@ int64_t smr_verify_peer(struct smr_ep *ep, fi_addr_t fi_addr) return -1; } -void smr_format_pend(struct smr_tx_entry *pend, void *context, - struct ofi_mr **mr, const struct iovec *iov, - uint32_t iov_count, uint64_t op_flags, int64_t id) +void smr_format_tx_pend(struct smr_pend_entry *pend, void *context, + struct ofi_mr **mr, const struct iovec *iov, + uint32_t iov_count, uint64_t op_flags, int64_t id) { - pend->context = context; + pend->tx.context = context; + pend->tx.peer_id = id; + pend->tx.op_flags = op_flags; + memcpy(pend->iov, iov, sizeof(*iov) * iov_count); pend->iov_count = iov_count; - pend->peer_id = id; - pend->op_flags = op_flags; pend->bytes_done = 0; if (mr) @@ -366,7 +367,7 @@ static int smr_format_sar(struct smr_ep *ep, struct smr_cmd *cmd, struct ofi_mr **mr, const struct iovec *iov, size_t count, size_t total_len, struct smr_region *smr, struct smr_region *peer_smr, - int64_t id, struct smr_tx_entry *pending) + int64_t id, struct smr_pend_entry *pending) { int i, ret; @@ -496,18 +497,18 @@ static ssize_t smr_do_inject(struct smr_ep *ep, struct smr_region *peer_smr, struct smr_cmd *cmd) { struct smr_inject_buf *tx_buf; - struct smr_tx_entry *pend; + struct smr_pend_entry *pend; tx_buf = smr_freestack_pop(smr_inject_pool(ep->region)); assert(tx_buf); if (op == ofi_op_read_req) { - pend = ofi_freestack_pop(ep->tx_fs); + pend = ofi_buf_alloc(ep->pend_pool); assert(pend); cmd->hdr.tx_ctx = (uintptr_t) pend; - smr_format_pend(pend, context, desc, iov, iov_count, op_flags, - id); + smr_format_tx_pend(pend, context, desc, iov, iov_count, + op_flags, id); } else { cmd->hdr.tx_ctx = 0; } @@ -525,16 +526,16 @@ static ssize_t smr_do_iov(struct smr_ep *ep, struct smr_region *peer_smr, size_t iov_count, size_t total_len, void *context, struct smr_cmd *cmd) { - struct smr_tx_entry *pend; + struct smr_pend_entry *pend; - pend = ofi_freestack_pop(ep->tx_fs); + pend = ofi_buf_alloc(ep->pend_pool); assert(pend); cmd->hdr.tx_ctx = (uintptr_t) pend; smr_generic_format(cmd, peer_id, op, tag, data, op_flags); smr_format_iov(cmd, iov, iov_count, total_len, ep->region); - smr_format_pend(pend, context, desc, iov, iov_count, op_flags, id); + smr_format_tx_pend(pend, context, desc, iov, iov_count, op_flags, id); return FI_SUCCESS; } @@ -546,24 +547,23 @@ static ssize_t smr_do_sar(struct smr_ep *ep, struct smr_region *peer_smr, size_t iov_count, size_t total_len, void *context, struct smr_cmd *cmd) { - struct smr_tx_entry *pend; + struct smr_pend_entry *pend; int ret; - pend = ofi_freestack_pop(ep->tx_fs); + pend = ofi_buf_alloc(ep->pend_pool); assert(pend); cmd->hdr.tx_ctx = (uintptr_t) pend; - smr_format_pend(pend, context, desc, iov, iov_count, op_flags, id); + smr_format_tx_pend(pend, context, desc, iov, iov_count, op_flags, id); smr_generic_format(cmd, peer_id, op, tag, data, op_flags); ret = smr_format_sar(ep, cmd, desc, iov, iov_count, total_len, ep->region, peer_smr, id, pend); if (ret) { - ofi_freestack_push(ep->tx_fs, pend); + ofi_buf_free(pend); return ret; } - return FI_SUCCESS; } @@ -574,10 +574,10 @@ static ssize_t smr_do_ipc(struct smr_ep *ep, struct smr_region *peer_smr, size_t iov_count, size_t total_len, void *context, struct smr_cmd *cmd) { - struct smr_tx_entry *pend; + struct smr_pend_entry *pend; int ret = -FI_EAGAIN; - pend = ofi_freestack_pop(ep->tx_fs); + pend = ofi_buf_alloc(ep->pend_pool); assert(pend); cmd->hdr.tx_ctx = (uintptr_t) pend; @@ -590,13 +590,13 @@ static ssize_t smr_do_ipc(struct smr_ep *ep, struct smr_region *peer_smr, FI_WARN_ONCE(&smr_prov, FI_LOG_EP_CTRL, "unable to use IPC for msg, " "fallback to using SAR\n"); - ofi_freestack_push(ep->tx_fs, pend); + ofi_buf_free(pend); return smr_do_sar(ep, peer_smr, id, peer_id, op, tag, data, op_flags, desc, iov, iov_count, total_len, context, cmd); } - smr_format_pend(pend, context, desc, iov, iov_count, op_flags, id); + smr_format_tx_pend(pend, context, desc, iov, iov_count, op_flags, id); return FI_SUCCESS; } @@ -635,10 +635,8 @@ static int smr_ep_close(struct fid *fid) if (ep->unexp_buf_pool) ofi_bufpool_destroy(ep->unexp_buf_pool); - if (ep->pend_buf_pool) - ofi_bufpool_destroy(ep->pend_buf_pool); - - smr_tx_fs_free(ep->tx_fs); + if (ep->pend_pool) + ofi_bufpool_destroy(ep->pend_pool); free((void *)ep->name); free(ep); @@ -930,13 +928,14 @@ static int smr_create_pools(struct smr_ep *ep, struct fi_info *info) if (ret) goto free2; - ret = ofi_bufpool_create(&ep->pend_buf_pool, + ret = ofi_bufpool_create(&ep->pend_pool, sizeof(struct smr_pend_entry), - 16, 0, 4, OFI_BUFPOOL_NO_TRACK); + 16, 0, ep->tx_size, OFI_BUFPOOL_NO_TRACK); if (ret) goto free1; return FI_SUCCESS; + free1: ofi_bufpool_destroy(ep->unexp_buf_pool); free2: @@ -948,7 +947,7 @@ static int smr_create_pools(struct smr_ep *ep, struct fi_info *info) } int smr_endpoint(struct fid_domain *domain, struct fi_info *info, - struct fid_ep **ep_fid, void *context) + struct fid_ep **ep_fid, void *context) { struct smr_ep *ep; int ret; @@ -981,8 +980,6 @@ int smr_endpoint(struct fid_domain *domain, struct fi_info *info, if (ret) goto ep; - ep->tx_fs = smr_tx_fs_create(info->tx_attr->size, NULL, NULL); - dlist_init(&ep->ipc_cpy_pend_list); slist_init(&ep->overflow_list); diff --git a/prov/shm/src/smr_progress.c b/prov/shm/src/smr_progress.c index 63ddaaa5d32..7b28e8f40c0 100644 --- a/prov/shm/src/smr_progress.c +++ b/prov/shm/src/smr_progress.c @@ -35,7 +35,7 @@ static void smr_progress_overflow(struct smr_ep *ep) { struct smr_cmd_entry *ce; - struct smr_tx_entry *pending; + struct smr_pend_entry *pending; struct smr_region *peer_smr; struct smr_cmd *cmd; int64_t pos; @@ -45,14 +45,14 @@ static void smr_progress_overflow(struct smr_ep *ep) entry = ep->overflow_list.head; while (entry) { cmd = (struct smr_cmd *) entry; - pending = (struct smr_tx_entry *) cmd->hdr.tx_ctx; - peer_smr = smr_peer_region(ep, pending->peer_id); + pending = (struct smr_pend_entry *) cmd->hdr.tx_ctx; + peer_smr = smr_peer_region(ep, pending->tx.peer_id); ret = smr_cmd_queue_next(smr_cmd_queue(peer_smr), &ce, &pos); if (ret == -FI_ENOENT) return; - ce->ptr = smr_local_to_peer(ep, pending->peer_id, cmd->hdr.id, - (uintptr_t) cmd); + ce->ptr = smr_local_to_peer(ep, pending->tx.peer_id, + cmd->hdr.id, (uintptr_t) cmd); slist_remove_head(&ep->overflow_list); smr_cmd_queue_commit(ce, pos); @@ -70,7 +70,7 @@ static void smr_try_send_cmd(struct smr_ep *ep, struct smr_cmd *cmd) } static inline void smr_free_sar_bufs(struct smr_ep *ep, struct smr_cmd *cmd, - struct smr_tx_entry *pending) + struct smr_pend_entry *pending) { int i; @@ -78,11 +78,11 @@ static inline void smr_free_sar_bufs(struct smr_ep *ep, struct smr_cmd *cmd, smr_freestack_push_by_index(smr_sar_pool(ep->region), cmd->data.sar[i]); } - smr_peer_data(ep->region)[pending->peer_id].sar_status = 0; + smr_peer_data(ep->region)[pending->tx.peer_id].sar_status = 0; } static int smr_progress_return_entry(struct smr_ep *ep, struct smr_cmd *cmd, - struct smr_tx_entry *pending) + struct smr_pend_entry *pending) { struct smr_inject_buf *tx_buf = NULL; uint8_t *src; @@ -169,7 +169,7 @@ static void smr_progress_return(struct smr_ep *ep) { struct smr_return_entry *queue_entry; struct smr_cmd *cmd; - struct smr_tx_entry *pending; + struct smr_pend_entry *pending; int64_t pos; int ret; @@ -181,7 +181,7 @@ static void smr_progress_return(struct smr_ep *ep) break; cmd = (struct smr_cmd *) queue_entry->ptr; - pending = (struct smr_tx_entry *) cmd->hdr.tx_ctx; + pending = (struct smr_pend_entry *) cmd->hdr.tx_ctx; ret = smr_progress_return_entry(ep, cmd, pending); if (ret != -FI_EAGAIN) { @@ -189,22 +189,22 @@ static void smr_progress_return(struct smr_ep *ep) if (cmd->hdr.status) { ret = smr_write_err_comp( ep->util_ep.tx_cq, - pending->context, - pending->op_flags, + pending->tx.context, + pending->tx.op_flags, cmd->hdr.tag, cmd->hdr.status); } else { ret = smr_complete_tx( - ep, pending->context, + ep, pending->tx.context, cmd->hdr.op, - pending->op_flags); + pending->tx.op_flags); } if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "unable to process " "tx completion\n"); } - ofi_freestack_push(ep->tx_fs, pending); + ofi_buf_free(pending); } smr_freestack_push(smr_cmd_stack(ep->region), cmd); } @@ -298,6 +298,7 @@ static void smr_buffer_sar(struct smr_ep *ep, struct smr_pend_entry *sar_entry, struct smr_region *peer_smr; struct smr_sar_buf *sar_buf; struct smr_unexp_buf *buf; + struct smr_cmd_ctx *cmd_ctx = sar_entry->rx.rx_entry->peer_context; size_t bytes; int next_buf = 0; @@ -312,7 +313,7 @@ static void smr_buffer_sar(struct smr_ep *ep, struct smr_pend_entry *sar_entry, "(-FI_ENOMEM)\n"); return; } - slist_insert_tail(&buf->entry, &sar_entry->cmd_ctx->buf_list); + slist_insert_tail(&buf->entry, &cmd_ctx->buf_list); sar_buf = smr_freestack_get_entry_from_index( smr_sar_pool(peer_smr), @@ -336,7 +337,7 @@ static int smr_progress_pending_sar(struct smr_ep *ep, struct smr_cmd *cmd) int ret; sar_entry = (struct smr_pend_entry *) cmd->hdr.rx_ctx; - if (sar_entry->cmd_ctx) { + if (sar_entry->rx.rx_entry && sar_entry->rx.rx_entry->peer_context) { smr_buffer_sar(ep, sar_entry, cmd); goto out; } @@ -357,10 +358,10 @@ static int smr_progress_pending_sar(struct smr_ep *ep, struct smr_cmd *cmd) cmd->hdr.status = ret; if (sar_entry->bytes_done == cmd->hdr.size || ret) { - if (sar_entry->rx_entry) { - comp_ctx = sar_entry->rx_entry->context; + if (sar_entry->rx.rx_entry) { + comp_ctx = sar_entry->rx.rx_entry->context; comp_flags = smr_rx_cq_flags( - sar_entry->rx_entry->flags, + sar_entry->rx.rx_entry->flags, cmd->hdr.op_flags); } else { comp_ctx = NULL; @@ -382,8 +383,8 @@ static int smr_progress_pending_sar(struct smr_ep *ep, struct smr_cmd *cmd) FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "unable to process rx completion\n"); } - if (sar_entry->rx_entry) - ep->srx->owner_ops->free_entry(sar_entry->rx_entry); + if (sar_entry->rx.rx_entry) + ep->srx->owner_ops->free_entry(sar_entry->rx.rx_entry); ofi_buf_free(sar_entry); } @@ -433,14 +434,13 @@ ssize_t smr_progress_sar(struct smr_ep *ep, struct smr_cmd *cmd, if (bytes_done == cmd->hdr.size) goto out; - sar_entry = ofi_buf_alloc(ep->pend_buf_pool); + sar_entry = ofi_buf_alloc(ep->pend_pool); cmd->hdr.rx_ctx = (uintptr_t) sar_entry; sar_entry->bytes_done = bytes_done; - sar_entry->cmd_ctx = NULL; memcpy(sar_entry->iov, sar_iov, sizeof(*sar_iov) * iov_count); sar_entry->iov_count = iov_count; - sar_entry->rx_entry = rx_entry ? rx_entry : NULL; + sar_entry->rx.rx_entry = rx_entry; if (mr) memcpy(sar_entry->mr, mr, sizeof(*mr) * iov_count); else @@ -460,20 +460,20 @@ static int smr_ipc_async_copy(struct smr_ep *ep, struct smr_cmd *cmd, uint64_t device = cmd->data.ipc_info.device; int ret; - ipc_entry = ofi_buf_alloc(ep->pend_buf_pool); + ipc_entry = ofi_buf_alloc(ep->pend_pool); if (!ipc_entry) return -FI_ENOMEM; cmd->hdr.rx_ctx = (uintptr_t) ipc_entry; - ipc_entry->ipc_entry = mr_entry; + ipc_entry->rx.ipc_entry = mr_entry; ipc_entry->bytes_done = 0; - ipc_entry->cmd = cmd; + ipc_entry->rx.cmd = cmd; memcpy(ipc_entry->iov, iov, sizeof(*iov) * iov_count); ipc_entry->iov_count = iov_count; - ipc_entry->rx_entry = rx_entry; + ipc_entry->rx.rx_entry = rx_entry; ret = ofi_create_async_copy_event(iface, device, - &ipc_entry->async_event); + &ipc_entry->rx.async_event); if (ret < 0) goto fail; @@ -481,16 +481,16 @@ static int smr_ipc_async_copy(struct smr_ep *ep, struct smr_cmd *cmd, ret = ofi_async_copy_from_hmem_iov(ptr, cmd->hdr.size, iface, device, iov, iov_count, 0, - ipc_entry->async_event); + ipc_entry->rx.async_event); } else { ret = ofi_async_copy_to_hmem_iov(iface, device, iov, iov_count, 0, ptr, cmd->hdr.size, - ipc_entry->async_event); + ipc_entry->rx.async_event); } if (ret < 0) goto fail; - dlist_insert_tail(&ipc_entry->entry, &ep->ipc_cpy_pend_list); + dlist_insert_tail(&ipc_entry->rx.entry, &ep->ipc_cpy_pend_list); return FI_SUCCESS; fail: @@ -675,12 +675,13 @@ static int smr_progress_inject_atomic(struct smr_cmd *cmd, struct ofi_mr **mr, } static int smr_start_common(struct smr_ep *ep, struct smr_cmd *cmd, - struct fi_peer_rx_entry *rx_entry) + struct fi_peer_rx_entry *rx_entry) { uint64_t comp_flags; void *comp_buf; int ret; + rx_entry->peer_context = NULL; assert (cmd->hdr.proto < smr_proto_max); ret = smr_progress_ops[cmd->hdr.proto]( ep, cmd, rx_entry, @@ -724,7 +725,7 @@ static int smr_copy_saved(struct smr_cmd_ctx *cmd_ctx, uint64_t comp_flags; int ret; - sar_entry = (struct smr_pend_entry *) cmd_ctx->cmd->hdr.rx_ctx; + sar_entry = (struct smr_pend_entry *) cmd_ctx->pend; while (!slist_empty(&cmd_ctx->buf_list)) { slist_remove_head_container(&cmd_ctx->buf_list, struct smr_unexp_buf, sar_buf, @@ -738,8 +739,8 @@ static int smr_copy_saved(struct smr_cmd_ctx *cmd_ctx, ofi_buf_free(sar_buf); } if (bytes != cmd_ctx->cmd->hdr.size) { - sar_entry->cmd_ctx = NULL; - sar_entry->rx_entry = rx_entry; + sar_entry->rx.rx_entry = rx_entry; + rx_entry->peer_context = NULL; memcpy(sar_entry->iov, rx_entry->iov, sizeof(*rx_entry->iov) * rx_entry->count); sar_entry->iov_count = rx_entry->count; @@ -877,7 +878,7 @@ static int smr_alloc_cmd_ctx(struct smr_ep *ep, cmd_ctx->cmd = &cmd_ctx->cmd_cpy; if (cmd->hdr.size) { - sar_entry = ofi_buf_alloc(ep->pend_buf_pool); + sar_entry = ofi_buf_alloc(ep->pend_pool); if (!sar_entry) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "Error allocating sar entry\n"); @@ -885,12 +886,10 @@ static int smr_alloc_cmd_ctx(struct smr_ep *ep, return -FI_ENOMEM; } cmd->hdr.rx_ctx = (uintptr_t) sar_entry; - memcpy(&cmd_ctx->cmd_cpy, cmd, sizeof(cmd->hdr)); slist_init(&cmd_ctx->buf_list); - sar_entry->cmd_ctx = cmd_ctx; sar_entry->bytes_done = 0; - sar_entry->rx_entry = rx_entry; + sar_entry->rx.rx_entry = rx_entry; smr_buffer_sar(ep, sar_entry, cmd); } @@ -1176,29 +1175,30 @@ void smr_progress_ipc_list(struct smr_ep *ep) /* after the synchronize all operations should be complete */ dlist_foreach_container_safe(&ep->ipc_cpy_pend_list, struct smr_pend_entry, - ipc_entry, entry, tmp) { - iface = ipc_entry->cmd->data.ipc_info.iface; - device = ipc_entry->cmd->data.ipc_info.device; + ipc_entry, rx.entry, tmp) { + iface = ipc_entry->rx.cmd->data.ipc_info.iface; + device = ipc_entry->rx.cmd->data.ipc_info.device; - if (ofi_async_copy_query(iface, ipc_entry->async_event)) + if (ofi_async_copy_query(iface, ipc_entry->rx.async_event)) continue; - if (ipc_entry->rx_entry) { - context = ipc_entry->rx_entry->context; - flags = smr_rx_cq_flags(ipc_entry->rx_entry->flags, - ipc_entry->cmd->hdr.op_flags); + if (ipc_entry->rx.rx_entry) { + context = ipc_entry->rx.rx_entry->context; + flags = smr_rx_cq_flags( + ipc_entry->rx.rx_entry->flags, + ipc_entry->rx.cmd->hdr.op_flags); } else { context = NULL; - flags = smr_rx_cq_flags(0, - ipc_entry->cmd->hdr.op_flags); + flags = smr_rx_cq_flags( + 0, ipc_entry->rx.cmd->hdr.op_flags); } - ret = smr_complete_rx(ep, context, ipc_entry->cmd->hdr.op, - flags, ipc_entry->cmd->hdr.size, + ret = smr_complete_rx(ep, context, ipc_entry->rx.cmd->hdr.op, + flags, ipc_entry->rx.cmd->hdr.size, ipc_entry->iov[0].iov_base, - ipc_entry->cmd->hdr.id, - ipc_entry->cmd->hdr.tag, - ipc_entry->cmd->hdr.cq_data); + ipc_entry->rx.cmd->hdr.id, + ipc_entry->rx.cmd->hdr.tag, + ipc_entry->rx.cmd->hdr.cq_data); if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "unable to process rx completion\n"); @@ -1210,15 +1210,15 @@ void smr_progress_ipc_list(struct smr_ep *ep) * buffer is now free to be reused */ - ofi_mr_cache_delete(domain->ipc_cache, ipc_entry->ipc_entry); + ofi_mr_cache_delete(domain->ipc_cache, ipc_entry->rx.ipc_entry); ofi_free_async_copy_event(iface, device, - ipc_entry->async_event); - dlist_remove(&ipc_entry->entry); - if (ipc_entry->rx_entry) - ep->srx->owner_ops->free_entry(ipc_entry->rx_entry); + ipc_entry->rx.async_event); + dlist_remove(&ipc_entry->rx.entry); + if (ipc_entry->rx.rx_entry) + ep->srx->owner_ops->free_entry(ipc_entry->rx.rx_entry); ofi_buf_free(ipc_entry); - smr_return_cmd(ep, ipc_entry->cmd); + smr_return_cmd(ep, ipc_entry->rx.cmd); } } From 4f226d6f395e7633f68c3ca1ced7788a348d8318 Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Fri, 3 Jan 2025 13:11:00 -0800 Subject: [PATCH 11/13] prov/shm: remove proto data proto data isn't really needed since it's only used for the inject offset now and the cmd stack and inject buffers run in parallel. Use a simple inject buf array and access by index where the index is the same as the command's index in its stack Signed-off-by: Alexia Ingerson --- include/ofi_mem.h | 8 ++++++++ prov/shm/src/smr.h | 12 ++++++++---- prov/shm/src/smr_atomic.c | 34 ++++++++++++++++------------------ prov/shm/src/smr_ep.c | 15 +++++++-------- prov/shm/src/smr_progress.c | 10 ++++------ prov/shm/src/smr_util.c | 7 ++----- 6 files changed, 45 insertions(+), 41 deletions(-) diff --git a/include/ofi_mem.h b/include/ofi_mem.h index a0f42092392..d48747c10a7 100644 --- a/include/ofi_mem.h +++ b/include/ofi_mem.h @@ -259,6 +259,14 @@ static inline void smr_freestack_push_by_offset(struct smr_freestack *fs, fs->object_size); } +/* Get entry index in fs */ +static inline int16_t smr_freestack_get_index(struct smr_freestack *fs, + char *local_p) +{ + return (int16_t) (local_p - (char*) fs - fs->entry_base_offset) / + fs->object_size; +} + /* Push by object */ static inline void smr_freestack_push(struct smr_freestack *fs, void *local_p) { diff --git a/prov/shm/src/smr.h b/prov/shm/src/smr.h index 4f926800b5f..0783e3e58ee 100644 --- a/prov/shm/src/smr.h +++ b/prov/shm/src/smr.h @@ -123,7 +123,6 @@ enum { * proto - msg src (ex. smr_src_inline, defined above) * op_flags - operation flags (ex. SMR_REMOTE_CQ_DATA, defined above) * size - size of data transfer - * proto_data - src of additional protocol data (inject offset) * status - returned status of operation * cq_data - remote CQ data */ @@ -137,7 +136,6 @@ struct smr_cmd_hdr { uint16_t op_flags; uint64_t size; - uint64_t proto_data; int64_t status; uint64_t cq_data; union { @@ -349,9 +347,9 @@ static inline struct smr_freestack *smr_cmd_stack(struct smr_region *smr) { return (struct smr_freestack *) ((char *) smr + smr->cmd_stack_offset); } -static inline struct smr_freestack *smr_inject_pool(struct smr_region *smr) +static inline struct smr_inject_buf *smr_inject_pool(struct smr_region *smr) { - return (struct smr_freestack *) ((char *) smr + smr->inject_pool_offset); + return (struct smr_inject_buf *) ((char *) smr + smr->inject_pool_offset); } static inline struct smr_return_queue *smr_return_queue(struct smr_region *smr) { @@ -365,6 +363,12 @@ static inline struct smr_freestack *smr_sar_pool(struct smr_region *smr) { return (struct smr_freestack *) ((char *) smr + smr->sar_pool_offset); } +static inline struct smr_inject_buf *smr_get_inject_buf(struct smr_region *smr, + struct smr_cmd *cmd) +{ + return &smr_inject_pool(smr)[smr_freestack_get_index(smr_cmd_stack(smr), + (char *) cmd)]; +} struct smr_attr { const char *name; diff --git a/prov/shm/src/smr_atomic.c b/prov/shm/src/smr_atomic.c index e4b53166bda..0b2bc9fa696 100644 --- a/prov/shm/src/smr_atomic.c +++ b/prov/shm/src/smr_atomic.c @@ -67,38 +67,40 @@ static void smr_do_atomic_inline(struct smr_ep *ep, struct smr_region *peer_smr, smr_format_inline_atomic(cmd, desc, iov, iov_count); } -static void smr_format_inject_atomic(struct smr_cmd *cmd, struct ofi_mr **desc, +static void smr_format_inject_atomic( + struct smr_cmd *cmd, struct ofi_mr **desc, const struct iovec *iov, size_t count, const struct iovec *resultv, size_t result_count, struct ofi_mr **comp_desc, const struct iovec *compv, - size_t comp_count, struct smr_region *smr, - struct smr_inject_buf *tx_buf) + size_t comp_count, struct smr_region *smr) { + struct smr_inject_buf *tx_buf; size_t comp_size; cmd->hdr.proto = smr_proto_inject; - cmd->hdr.proto_data = smr_get_offset(smr, tx_buf); + tx_buf = smr_get_inject_buf(smr, cmd); switch (cmd->hdr.op) { case ofi_op_atomic: - cmd->hdr.size = ofi_copy_from_mr_iov(tx_buf->data, - SMR_INJECT_SIZE, desc, iov, count, 0); + cmd->hdr.size = ofi_copy_from_mr_iov( + tx_buf->data, SMR_INJECT_SIZE, desc, + iov, count, 0); break; case ofi_op_atomic_fetch: if (cmd->hdr.atomic_op == FI_ATOMIC_READ) cmd->hdr.size = ofi_total_iov_len(resultv, result_count); else - cmd->hdr.size = ofi_copy_from_mr_iov(tx_buf->data, - SMR_INJECT_SIZE, desc, iov, - count, 0); + cmd->hdr.size = ofi_copy_from_mr_iov( + tx_buf->data, SMR_INJECT_SIZE, + desc, iov, count, 0); break; case ofi_op_atomic_compare: cmd->hdr.size = ofi_copy_from_mr_iov(tx_buf->buf, - SMR_COMP_INJECT_SIZE, - desc, iov, count, 0); - comp_size = ofi_copy_from_mr_iov(tx_buf->comp, - SMR_COMP_INJECT_SIZE, + SMR_COMP_INJECT_SIZE, + desc, iov, count, 0); + comp_size = ofi_copy_from_mr_iov( + tx_buf->comp, SMR_COMP_INJECT_SIZE, comp_desc, compv, comp_count, 0); if (comp_size != cmd->hdr.size) FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -121,17 +123,13 @@ static ssize_t smr_do_atomic_inject( size_t comp_count, size_t total_len, void *context, uint16_t smr_flags, struct smr_cmd *cmd) { - struct smr_inject_buf *tx_buf; struct smr_pend_entry *pend; - tx_buf = smr_freestack_pop(smr_inject_pool(ep->region)); - assert(tx_buf); - smr_generic_format(cmd, peer_id, op, 0, 0, op_flags); smr_generic_atomic_format(cmd, datatype, atomic_op); smr_format_inject_atomic(cmd, desc, iov, iov_count, resultv, result_count, comp_desc, compv, comp_count, - ep->region, tx_buf); + ep->region); if (op == ofi_op_atomic_fetch || op == ofi_op_atomic_compare || atomic_op == FI_ATOMIC_READ || op_flags & FI_DELIVERY_COMPLETE) { diff --git a/prov/shm/src/smr_ep.c b/prov/shm/src/smr_ep.c index 0903b6d6be0..1a2e9325316 100644 --- a/prov/shm/src/smr_ep.c +++ b/prov/shm/src/smr_ep.c @@ -257,11 +257,14 @@ static void smr_format_inline(struct smr_cmd *cmd, struct ofi_mr **mr, } static void smr_format_inject(struct smr_cmd *cmd, struct ofi_mr **mr, - const struct iovec *iov, size_t count, struct smr_region *smr, - struct smr_inject_buf *tx_buf) + const struct iovec *iov, size_t count, + struct smr_region *smr) { + struct smr_inject_buf *tx_buf; + + tx_buf = smr_get_inject_buf(smr, cmd); + cmd->hdr.proto = smr_proto_inject; - cmd->hdr.proto_data = smr_get_offset(smr, tx_buf); if (cmd->hdr.op != ofi_op_read_req) cmd->hdr.size = ofi_copy_from_mr_iov(tx_buf->data, SMR_INJECT_SIZE, @@ -496,12 +499,8 @@ static ssize_t smr_do_inject(struct smr_ep *ep, struct smr_region *peer_smr, size_t iov_count, size_t total_len, void *context, struct smr_cmd *cmd) { - struct smr_inject_buf *tx_buf; struct smr_pend_entry *pend; - tx_buf = smr_freestack_pop(smr_inject_pool(ep->region)); - assert(tx_buf); - if (op == ofi_op_read_req) { pend = ofi_buf_alloc(ep->pend_pool); assert(pend); @@ -514,7 +513,7 @@ static ssize_t smr_do_inject(struct smr_ep *ep, struct smr_region *peer_smr, } smr_generic_format(cmd, peer_id, op, tag, data, op_flags); - smr_format_inject(cmd, desc, iov, iov_count, ep->region, tx_buf); + smr_format_inject(cmd, desc, iov, iov_count, ep->region); return FI_SUCCESS; } diff --git a/prov/shm/src/smr_progress.c b/prov/shm/src/smr_progress.c index 7b28e8f40c0..ea2c01b3a73 100644 --- a/prov/shm/src/smr_progress.c +++ b/prov/shm/src/smr_progress.c @@ -126,7 +126,7 @@ static int smr_progress_return_entry(struct smr_ep *ep, struct smr_cmd *cmd, smr_try_send_cmd(ep, cmd); return -FI_EAGAIN; case smr_proto_inject: - tx_buf = smr_get_ptr(ep->region, cmd->hdr.proto_data); + tx_buf = smr_get_inject_buf(ep->region, cmd); if (pending) { if (pending->bytes_done != cmd->hdr.size && cmd->hdr.op != ofi_op_atomic) { @@ -155,7 +155,6 @@ static int smr_progress_return_entry(struct smr_ep *ep, struct smr_cmd *cmd, } } } - smr_freestack_push(smr_inject_pool(ep->region), tx_buf); break; default: FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -245,7 +244,7 @@ ssize_t smr_progress_inject(struct smr_ep *ep, struct smr_cmd *cmd, ssize_t ret; peer_smr = smr_peer_region(ep, cmd->hdr.id); - tx_buf = smr_get_ptr(peer_smr, (size_t) cmd->hdr.proto_data); + tx_buf = smr_get_inject_buf(peer_smr, cmd); if (cmd->hdr.op == ofi_op_read_req) { ret = ofi_copy_from_mr_iov(tx_buf->data, cmd->hdr.size, mr, @@ -636,12 +635,11 @@ static int smr_progress_inject_atomic(struct smr_cmd *cmd, struct ofi_mr **mr, struct smr_ep *ep, int err) { struct smr_inject_buf *tx_buf; - size_t inj_offset; uint8_t *src, *comp; int i; - inj_offset = (size_t) cmd->hdr.proto_data; - tx_buf = smr_get_ptr(ep->region, inj_offset); + tx_buf = smr_get_inject_buf(smr_peer_region(ep, cmd->hdr.id), cmd); + if (err) goto out; diff --git a/prov/shm/src/smr_util.c b/prov/shm/src/smr_util.c index 296eda76633..dec3962759a 100644 --- a/prov/shm/src/smr_util.c +++ b/prov/shm/src/smr_util.c @@ -103,8 +103,7 @@ size_t smr_calculate_size_offsets(size_t tx_count, size_t rx_count, sizeof(struct smr_cmd_queue_entry) * rx_size; inject_pool_offset = cmd_stack_offset + freestack_size(sizeof(struct smr_cmd), tx_size); - ret_queue_offset = inject_pool_offset + - freestack_size(sizeof(struct smr_inject_buf), tx_size); + ret_queue_offset = inject_pool_offset + sizeof(struct smr_inject_buf) * tx_size; ret_queue_offset = ofi_get_aligned_size(ret_queue_offset, 64); sar_pool_offset = ret_queue_offset + sizeof(struct smr_return_queue) + sizeof(struct smr_return_queue_entry) * tx_size; @@ -284,10 +283,8 @@ int smr_create(const struct fi_provider *prov, struct smr_map *map, smr_freestack_init(smr_cmd_stack(*smr), tx_size, sizeof(struct smr_cmd_entry)); - smr_freestack_init(smr_inject_pool(*smr), rx_size, - sizeof(struct smr_inject_buf)); smr_freestack_init(smr_sar_pool(*smr), SMR_MAX_PEERS, - sizeof(struct smr_sar_buf)); + sizeof(struct smr_sar_buf)); for (i = 0; i < SMR_MAX_PEERS; i++) { smr_peer_data(*smr)[i].id = -1; smr_peer_data(*smr)[i].sar_status = 0; From 1a7344121646ba8158725e8453906eef8ac630ee Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Tue, 31 Dec 2024 11:48:22 -0800 Subject: [PATCH 12/13] prov/shm: fix dsa implementation DSA copies happen asynchronously so we need a way to notify the receiver when the copy is done and the data is available. This used to be done with the response queue and sar list. The response queue notification can still be done with the return of the command but the sar list was removed since more data is sent by returning the command on a subsequent loop. DSA still needs this list to track asynchronous copies. This refactors the async ipc list and turns it into a generic async list to track asynchronous copies. If a DSA copy is not ready, the entry is inserted into the async list and polled until it is ready to be copied and then it resumes the regular SAR protocol where the command is returned to the sender. Tracking the status of the sar is done through the existing sar status of the peer but to check for the correct status, the rx id is also needed by the receiver for proper status exchange. This also refactors the ids sto make it clearer when an id is used for the trasmitter (tx) or target (rx) Signed-off-by: Alexia Ingerson --- include/ofi_hmem.h | 3 + prov/shm/src/smr.h | 90 +++++---- prov/shm/src/smr_atomic.c | 30 +-- prov/shm/src/smr_av.c | 2 +- prov/shm/src/smr_dsa.c | 327 ++++++++++++++------------------ prov/shm/src/smr_ep.c | 154 +++++++-------- prov/shm/src/smr_msg.c | 40 ++-- prov/shm/src/smr_progress.c | 369 +++++++++++++++++++----------------- prov/shm/src/smr_rma.c | 34 ++-- prov/shm/src/smr_util.c | 2 +- src/hmem.c | 6 +- 11 files changed, 521 insertions(+), 536 deletions(-) diff --git a/include/ofi_hmem.h b/include/ofi_hmem.h index 9db6d94cd70..531b673fb89 100644 --- a/include/ofi_hmem.h +++ b/include/ofi_hmem.h @@ -421,6 +421,9 @@ ssize_t ofi_copy_from_mr_iov(void *dest, size_t size, struct ofi_mr **mr, ssize_t ofi_copy_to_mr_iov(struct ofi_mr **mr, const struct iovec *iov, size_t iov_count, uint64_t iov_offset, const void *src, size_t size); +ssize_t ofi_copy_mr_iov(struct ofi_mr **mr, const struct iovec *iov, + size_t iov_count, size_t offset, void *buf, + size_t size, int dir); int ofi_hmem_get_handle(enum fi_hmem_iface iface, void *base_addr, size_t size, void **handle); diff --git a/prov/shm/src/smr.h b/prov/shm/src/smr.h index 0783e3e58ee..abd559e7753 100644 --- a/prov/shm/src/smr.h +++ b/prov/shm/src/smr.h @@ -130,7 +130,8 @@ struct smr_cmd_hdr { uint64_t entry; uint64_t tx_ctx; uint64_t rx_ctx; - int64_t id; + int64_t rx_id; + int64_t tx_id; uint32_t op; uint16_t proto; uint16_t op_flags; @@ -191,12 +192,19 @@ struct smr_cmd { #define SMR_NAME_MAX 256 #define SMR_PATH_MAX (SMR_NAME_MAX + sizeof(SMR_DIR)) +enum smr_sar_status { + SMR_SAR_FREE = 0, + SMR_SAR_BUSY, + SMR_SAR_READY, +}; + struct smr_peer_data { int64_t id; - uint32_t sar_status; - uint16_t name_sent; - uint16_t ipc_valid; uintptr_t local_region; + uint8_t sar_status; + uint8_t name_sent; + uint8_t ipc_valid; + uint8_t resv[5]; struct ofi_xpmem_client xpmem; }; @@ -320,13 +328,13 @@ struct smr_ep { struct ofi_bufpool *pend_pool; struct slist overflow_list; - struct dlist_entry ipc_cpy_pend_list; + struct dlist_entry async_cpy_list; size_t min_multi_recv_size; int ep_idx; enum ofi_shm_p2p_type p2p_type; void *dsa_context; - void (*smr_progress_ipc_list)(struct smr_ep *ep); + void (*smr_progress_async_list)(struct smr_ep *ep); }; struct smr_av { @@ -430,8 +438,8 @@ static inline uintptr_t smr_peer_to_owner(struct smr_ep *ep, static inline void smr_return_cmd(struct smr_ep *ep, struct smr_cmd *cmd) { - struct smr_region *peer_smr = smr_peer_region(ep, cmd->hdr.id); - uintptr_t peer_ptr = smr_peer_to_owner(ep, cmd->hdr.id, (uintptr_t) cmd); + struct smr_region *peer_smr = smr_peer_region(ep, cmd->hdr.rx_id); + uintptr_t peer_ptr; int64_t pos; struct smr_return_entry *queue_entry; int ret; @@ -443,8 +451,10 @@ static inline void smr_return_cmd(struct smr_ep *ep, struct smr_cmd *cmd) assert(0); } + peer_ptr = smr_peer_to_owner(ep, cmd->hdr.rx_id, (uintptr_t) cmd); assert(peer_ptr >= (uintptr_t) peer_smr->base_addr && - peer_ptr < (uintptr_t) peer_smr->base_addr + peer_smr->total_size); + peer_ptr < (uintptr_t) peer_smr->base_addr + + peer_smr->total_size); queue_entry->ptr = peer_ptr; smr_return_queue_commit(queue_entry, pos); @@ -485,25 +495,31 @@ int smr_query_atomic(struct fid_domain *domain, enum fi_datatype datatype, enum fi_op op, struct fi_atomic_attr *attr, uint64_t flags); +enum { + SMR_TX_ENTRY, + SMR_RX_ENTRY, +}; + struct smr_pend_entry { + struct dlist_entry entry; union { struct { - int64_t peer_id; - void *context; - uint64_t op_flags; - } tx; - struct { - struct dlist_entry entry; - struct smr_cmd *cmd; struct fi_peer_rx_entry *rx_entry; struct ofi_mr_entry *ipc_entry; ofi_hmem_async_event_t async_event; } rx; }; + uint8_t type; + struct smr_cmd *cmd; struct iovec iov[SMR_IOV_LIMIT]; size_t iov_count; struct ofi_mr *mr[SMR_IOV_LIMIT]; size_t bytes_done; + void *comp_ctx; + uint64_t comp_flags; + int sar_dir; + ssize_t (*sar_copy_fn)(struct smr_ep *ep, + struct smr_pend_entry *pend); }; struct smr_cmd_ctx { @@ -571,23 +587,20 @@ int64_t smr_verify_peer(struct smr_ep *ep, fi_addr_t fi_addr); void smr_format_tx_pend(struct smr_pend_entry *pend, void *context, struct ofi_mr **mr, const struct iovec *iov, - uint32_t iov_count, uint64_t op_flags, int64_t id); -void smr_generic_format(struct smr_cmd *cmd, int64_t peer_id, uint32_t op, - uint64_t tag, uint64_t data, uint64_t op_flags); -size_t smr_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_cmd *cmd, struct ofi_mr **mr, - const struct iovec *iov, size_t count, - size_t *bytes_done); -size_t smr_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_cmd *cmd, struct ofi_mr **mr, - const struct iovec *iov, size_t count, - size_t *bytes_done); + uint32_t iov_count, uint64_t op_flags); +void smr_generic_format(struct smr_cmd *cmd, int64_t tx_id, int64_t rx_id, + uint32_t op, uint64_t tag, uint64_t data, + uint64_t op_flags); +size_t smr_copy_to_sar(struct smr_ep *ep, struct smr_region *smr, + struct smr_pend_entry *pend); +size_t smr_copy_from_sar(struct smr_ep *ep, struct smr_region *smr, + struct smr_pend_entry *pend); int smr_select_proto(void **desc, size_t iov_count, bool cma_avail, bool ipc_valid, uint32_t op, uint64_t total_len, uint64_t op_flags); typedef ssize_t (*smr_send_func)( struct smr_ep *ep, struct smr_region *peer_smr, - int64_t id, int64_t peer_id, uint32_t op, uint64_t tag, + int64_t tx_id, int64_t rx_id, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags, struct ofi_mr **desc, const struct iovec *iov, size_t iov_count, size_t total_len, void *context, struct smr_cmd *cmd); @@ -639,23 +652,28 @@ static inline bool smr_ipc_valid(struct smr_ep *ep, struct smr_region *peer_smr, smr_peer_data(peer_smr)[peer_id].ipc_valid); } +static inline struct smr_freestack *smr_pend_sar_pool( + struct smr_ep *ep, struct smr_pend_entry *pend) +{ + if (pend->type == SMR_TX_ENTRY) + return smr_sar_pool(ep->region); + return smr_sar_pool(smr_peer_region(ep, pend->cmd->hdr.rx_id)); +} + int smr_unexp_start(struct fi_peer_rx_entry *rx_entry); -void smr_progress_ipc_list(struct smr_ep *ep); -static inline void smr_progress_ipc_list_noop(struct smr_ep *ep) +void smr_progress_async_list(struct smr_ep *ep); +static inline void smr_progress_async_list_noop(struct smr_ep *ep) { // noop } +ssize_t smr_copy_sar(struct smr_ep *ep, struct smr_pend_entry *pend); + /* SMR FUNCTIONS FOR DSA SUPPORT */ void smr_dsa_init(void); void smr_dsa_cleanup(void); -size_t smr_dsa_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_cmd *cmd, const struct iovec *iov, - size_t count, size_t *bytes_done); -size_t smr_dsa_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_cmd *cmd, const struct iovec *iov, - size_t count, size_t *bytes_done); +ssize_t smr_dsa_copy_sar(struct smr_ep *ep, struct smr_pend_entry *pend); void smr_dsa_context_init(struct smr_ep *ep); void smr_dsa_context_cleanup(struct smr_ep *ep); void smr_dsa_progress(struct smr_ep *ep); diff --git a/prov/shm/src/smr_atomic.c b/prov/shm/src/smr_atomic.c index 0b2bc9fa696..188f96bbca4 100644 --- a/prov/shm/src/smr_atomic.c +++ b/prov/shm/src/smr_atomic.c @@ -57,12 +57,12 @@ static void smr_format_inline_atomic(struct smr_cmd *cmd, struct ofi_mr **mr, } static void smr_do_atomic_inline(struct smr_ep *ep, struct smr_region *peer_smr, - int64_t id, int64_t peer_id, uint32_t op, + int64_t tx_id, int64_t rx_id, uint32_t op, uint64_t op_flags, uint8_t datatype, uint8_t atomic_op, struct ofi_mr **desc, const struct iovec *iov, size_t iov_count, size_t total_len, struct smr_cmd *cmd) { - smr_generic_format(cmd, peer_id, op, 0, 0, op_flags); + smr_generic_format(cmd, tx_id, rx_id, op, 0, 0, op_flags); smr_generic_atomic_format(cmd, datatype, atomic_op); smr_format_inline_atomic(cmd, desc, iov, iov_count); } @@ -114,7 +114,7 @@ static void smr_format_inject_atomic( static ssize_t smr_do_atomic_inject( struct smr_ep *ep, struct smr_region *peer_smr, - int64_t id, int64_t peer_id, uint32_t op, + int64_t tx_id, int64_t rx_id, uint32_t op, uint64_t op_flags, uint8_t datatype, uint8_t atomic_op, struct ofi_mr **desc, const struct iovec *iov, size_t iov_count, struct ofi_mr **res_desc, @@ -125,7 +125,7 @@ static ssize_t smr_do_atomic_inject( { struct smr_pend_entry *pend; - smr_generic_format(cmd, peer_id, op, 0, 0, op_flags); + smr_generic_format(cmd, tx_id, rx_id, op, 0, 0, op_flags); smr_generic_atomic_format(cmd, datatype, atomic_op); smr_format_inject_atomic(cmd, desc, iov, iov_count, resultv, result_count, comp_desc, compv, comp_count, @@ -137,7 +137,7 @@ static ssize_t smr_do_atomic_inject( assert(pend); cmd->hdr.tx_ctx = (uintptr_t) pend; smr_format_tx_pend(pend, context, res_desc, resultv, - result_count, op_flags, id); + result_count, op_flags); } else { cmd->hdr.tx_ctx = 0; } @@ -173,7 +173,7 @@ static ssize_t smr_generic_atomic( struct iovec compare_iov[SMR_IOV_LIMIT]; struct iovec result_iov[SMR_IOV_LIMIT]; uint16_t smr_flags = 0; - int64_t id, peer_id, pos; + int64_t tx_id, rx_id, pos; int proto; ssize_t ret = 0; size_t total_len; @@ -183,14 +183,14 @@ static ssize_t smr_generic_atomic( assert(compare_count <= SMR_IOV_LIMIT); assert(rma_count <= SMR_IOV_LIMIT); - id = smr_verify_peer(ep, addr); - if (id < 0) + tx_id = smr_verify_peer(ep, addr); + if (tx_id < 0) return -FI_EAGAIN; - peer_id = smr_peer_data(ep->region)[id].id; - peer_smr = smr_peer_region(ep, id); + rx_id = smr_peer_data(ep->region)[tx_id].id; + peer_smr = smr_peer_region(ep, tx_id); - if (smr_peer_data(ep->region)[id].sar_status) + if (smr_peer_data(ep->region)[tx_id].sar_status) return -FI_EAGAIN; ret = smr_cmd_queue_next(smr_cmd_queue(peer_smr), &ce, &pos); @@ -230,8 +230,8 @@ static ssize_t smr_generic_atomic( if (proto == smr_proto_inline) { cmd = &ce->cmd; - ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) cmd); - smr_do_atomic_inline(ep, peer_smr, id, peer_id, ofi_op_atomic, + ce->ptr = smr_peer_to_peer(ep, tx_id, (uintptr_t) cmd); + smr_do_atomic_inline(ep, peer_smr, tx_id, rx_id, ofi_op_atomic, op_flags, datatype, atomic_op, (struct ofi_mr **) desc, iov, count, total_len, cmd); @@ -244,8 +244,8 @@ static ssize_t smr_generic_atomic( cmd = smr_freestack_pop(smr_cmd_stack(ep->region)); assert(cmd); - ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); - ret = smr_do_atomic_inject(ep, peer_smr, id, peer_id, op, + ce->ptr = smr_local_to_peer(ep, tx_id, rx_id, (uintptr_t) cmd); + ret = smr_do_atomic_inject(ep, peer_smr, tx_id, rx_id, op, op_flags, datatype, atomic_op, (struct ofi_mr **) desc, iov, count, (struct ofi_mr **) result_desc, result_iov, diff --git a/prov/shm/src/smr_av.c b/prov/shm/src/smr_av.c index df775e08253..6ba26d81eaa 100644 --- a/prov/shm/src/smr_av.c +++ b/prov/shm/src/smr_av.c @@ -97,7 +97,7 @@ static fi_addr_t smr_get_addr(struct fi_peer_rx_entry *rx_entry) av = container_of(cmd_ctx->ep->util_ep.av, struct smr_av, util_av); - return av->smr_map.peers[cmd_ctx->cmd->hdr.id].fiaddr; + return av->smr_map.peers[cmd_ctx->cmd->hdr.rx_id].fiaddr; } static int smr_av_insert(struct fid_av *av_fid, const void *addr, size_t count, diff --git a/prov/shm/src/smr_dsa.c b/prov/shm/src/smr_dsa.c index e6bc17b0cee..e33896b5844 100644 --- a/prov/shm/src/smr_dsa.c +++ b/prov/shm/src/smr_dsa.c @@ -60,13 +60,9 @@ struct dsa_bitmap { struct dsa_cmd_context { size_t bytes_in_progress; - int index; + struct smr_pend_entry *pend; int batch_size; - int dir; - uint32_t op; - // We keep track of the entry type to know which lock to acquire - // when we need to do the updates after completion - void *entry_ptr; + int index; }; struct wq_handle { @@ -270,12 +266,10 @@ static int dsa_idxd_init_wq_array(int shared, int numa_node, static struct accfg_ctx *ctx; struct accfg_wq *wq; void *wq_reg; - int fd; enum accfg_device_state dstate; enum accfg_wq_state wstate; enum accfg_wq_type type; - int mode; - int wq_count = 0; + int mode, fd, wq_count = 0; struct accfg_device *device; bool wq_mmap_support = true; bool wq_write_support = false; @@ -299,14 +293,14 @@ static int dsa_idxd_init_wq_array(int shared, int numa_node, (*dsa_ops.accfg_device_get_numa_node)(device) != numa_node) continue; - dsa_foreach_wq(device, wq) - { + dsa_foreach_wq(device, wq) { /* Get a workqueue that's enabled */ wstate = (*dsa_ops.accfg_wq_get_state)(wq); if (wstate != ACCFG_WQ_ENABLED) continue; - if ((*dsa_ops.accfg_wq_get_max_transfer_size)(wq) < SMR_SAR_SIZE) + if ((*dsa_ops.accfg_wq_get_max_transfer_size)(wq) < + SMR_SAR_SIZE) continue; /* The wq type should be user */ @@ -322,8 +316,8 @@ static int dsa_idxd_init_wq_array(int shared, int numa_node, /* This is a candidate wq */ FI_DBG(&smr_prov, FI_LOG_EP_CTRL, - "DSA WQ: %s\n", - (*dsa_ops.accfg_wq_get_devname)(wq)); + "DSA WQ: %s\n", + (*dsa_ops.accfg_wq_get_devname)(wq)); fd = -1; wq_reg = NULL; @@ -367,7 +361,6 @@ static int dsa_idxd_init_wq_array(int shared, int numa_node, assert(wq_count == 0); } - (*dsa_ops.accfg_unref)(ctx); return wq_count; } @@ -404,52 +397,51 @@ static int dsa_bitmap_is_empty(struct dsa_bitmap *bitmap) } static struct dsa_cmd_context * -dsa_allocate_cmd_context(struct smr_dsa_context *smr_dsa_context) +dsa_allocate_cmd_context(struct smr_dsa_context *dsa_ctx) { - struct dsa_cmd_context *dsa_cmd_context; + struct dsa_cmd_context *cmd_ctx; int i; for (i = 0; i < CMD_CONTEXT_COUNT; i++) { - if (!dsa_bitmap_test_and_set_bit(&smr_dsa_context->dsa_bitmap, i)) + if (!dsa_bitmap_test_and_set_bit(&dsa_ctx->dsa_bitmap, i)) break; } if (i == CMD_CONTEXT_COUNT) return NULL; - dsa_cmd_context = &smr_dsa_context->dsa_cmd_context[i]; - memset(dsa_cmd_context, 0, sizeof(*dsa_cmd_context)); - dsa_cmd_context->index = i; + cmd_ctx = &dsa_ctx->dsa_cmd_context[i]; + memset(cmd_ctx, 0, sizeof(*cmd_ctx)); + cmd_ctx->index = i; - return dsa_cmd_context; + return cmd_ctx; } -static void dsa_free_cmd_context(struct dsa_cmd_context *dsa_cmd_context, - struct smr_dsa_context *smr_dsa_context) +static void dsa_free_cmd_context(struct dsa_cmd_context *cmd_ctx, + struct smr_dsa_context *dsa_ctx) { - dsa_bitmap_clear_bit(&smr_dsa_context->dsa_bitmap, - dsa_cmd_context->index); + dsa_bitmap_clear_bit(&dsa_ctx->dsa_bitmap, cmd_ctx->index); } static struct dsa_hw_desc * -dsa_get_work_descriptor_array_ptr(struct dsa_cmd_context *dsa_cmd_context, - struct smr_dsa_context *dsa_context) +dsa_get_work_descriptor_array_ptr(struct dsa_cmd_context *cmd_ctx, + struct smr_dsa_context *dsa_ctx) { - return &dsa_context->dsa_work_desc[dsa_cmd_context->index * + return &dsa_ctx->dsa_work_desc[cmd_ctx->index * MAX_CMD_BATCH_SIZE]; } static struct dsa_hw_desc * -dsa_get_free_work_descriptor(struct dsa_cmd_context *dsa_cmd_context, - struct smr_dsa_context *dsa_context) +dsa_get_free_work_descriptor(struct dsa_cmd_context *cmd_ctx, + struct smr_dsa_context *dsa_ctx) { struct dsa_hw_desc *free_desc; struct dsa_completion_record *free_comp; - free_desc = &dsa_context->dsa_work_desc[dsa_cmd_context->index * - MAX_CMD_BATCH_SIZE + dsa_cmd_context->batch_size]; - free_comp = &dsa_context->dsa_work_comp[dsa_cmd_context->index * - MAX_CMD_BATCH_SIZE + dsa_cmd_context->batch_size++]; + free_desc = &dsa_ctx->dsa_work_desc[cmd_ctx->index * + MAX_CMD_BATCH_SIZE + cmd_ctx->batch_size]; + free_comp = &dsa_ctx->dsa_work_comp[cmd_ctx->index * + MAX_CMD_BATCH_SIZE + cmd_ctx->batch_size++]; memset(free_desc, 0, sizeof(*free_desc)); memset(free_comp, 0, sizeof(*free_comp)); @@ -466,8 +458,8 @@ dsa_get_work_completion_array_ptr(struct dsa_cmd_context *dsa_cmd_context, MAX_CMD_BATCH_SIZE]; } -static struct dsa_cmd_context *dsa_get_cmd_context(struct smr_dsa_context - *dsa_context, int index) +static struct dsa_cmd_context *dsa_get_cmd_context( + struct smr_dsa_context *dsa_context, int index) { if (dsa_bitmap_test_bit(&dsa_context->dsa_bitmap, index)) return &dsa_context->dsa_cmd_context[index]; @@ -497,8 +489,9 @@ static void dsa_touch_buffer_pages(struct dsa_hw_desc *desc) *dst_addr = *dst_addr; } - // Touch last byte in case start of buffer is not aligned to page - // boundary + /* Touch last byte in case start of buffer is not aligned to page + * boundary + */ src_addr = (char *)desc->src_addr + (desc->xfer_size - 1); dst_addr = (char *)desc->dst_addr + (desc->xfer_size - 1); @@ -519,44 +512,51 @@ static void dsa_prepare_copy_desc(struct dsa_hw_desc *desc, desc->dst_addr = dst_addr; } -static void smr_dsa_copy_sar(struct smr_freestack *sar_pool, - struct smr_dsa_context *dsa_context, - struct dsa_cmd_context *dsa_cmd_context, - struct smr_cmd *cmd, const struct iovec *iov, - size_t count, size_t *bytes_done, - struct smr_region *region) +ssize_t smr_dsa_copy_sar(struct smr_ep *ep, struct smr_pend_entry *pend) { + struct smr_dsa_context *dsa_ctx = ep->dsa_context; + struct dsa_cmd_context *cmd_ctx; + struct smr_region *peer_smr; + struct smr_freestack *sar_pool; struct smr_sar_buf *smr_sar_buf; - size_t remaining_sar_size; - size_t remaining_iov_size; - size_t iov_len; - size_t iov_index = 0; - int sar_index = 0; - int cmd_index = 0; - size_t iov_offset = *bytes_done; - size_t sar_offset = 0; - size_t cmd_size = 0; - char *iov_buf = NULL; - char *sar_buf = NULL; + size_t remaining_sar_size, remaining_iov_size, iov_len, iov_index; + size_t iov_offset, sar_offset, cmd_size, dsa_bytes_pending = 0; + int sar_index = 0, cmd_index = 0; + char *iov_buf = NULL, *sar_buf = NULL; struct dsa_hw_desc *desc = NULL; - size_t dsa_bytes_pending = 0; - for (iov_index = 0; iov_index < count; iov_index++) { - iov_len = iov[iov_index].iov_len; + assert(smr_env.use_dsa_sar); + + if (pend->type == SMR_RX_ENTRY) { + peer_smr = smr_peer_region(ep, pend->cmd->hdr.rx_id); + if (smr_peer_data(peer_smr)[pend->cmd->hdr.tx_id].sar_status != + SMR_SAR_READY) + return -FI_EAGAIN; + } + cmd_ctx = dsa_allocate_cmd_context(ep->dsa_context); + if (!cmd_ctx) + return -FI_ENOMEM; + + cmd_ctx->pend = pend; + + iov_offset = pend->bytes_done; + for (iov_index = 0; iov_index < pend->iov_count; iov_index++) { + iov_len = pend->iov[iov_index].iov_len; if (iov_offset < iov_len) break; iov_offset -= iov_len; } - while ((iov_index < count) && - (sar_index < cmd->msg.data.buf_batch_size) && + sar_pool = smr_pend_sar_pool(ep, pend); + while ((iov_index < pend->iov_count) && + (sar_index < pend->cmd->data.buf_batch_size) && (cmd_index < MAX_CMD_BATCH_SIZE)) { smr_sar_buf = smr_freestack_get_entry_from_index( - sar_pool, cmd->msg.data.sar[sar_index]); - iov_len = iov[iov_index].iov_len; + sar_pool, pend->cmd->data.sar[sar_index]); + iov_len = pend->iov[iov_index].iov_len; - iov_buf = (char *)iov[iov_index].iov_base + iov_offset; + iov_buf = (char *)pend->iov[iov_index].iov_base + iov_offset; sar_buf = (char *)smr_sar_buf->buf + sar_offset; remaining_sar_size = SMR_SAR_SIZE - sar_offset; @@ -564,17 +564,16 @@ static void smr_dsa_copy_sar(struct smr_freestack *sar_pool, cmd_size = MIN(remaining_iov_size, remaining_sar_size); assert(cmd_size > 0); - desc = dsa_get_free_work_descriptor(dsa_cmd_context, - dsa_context); + desc = dsa_get_free_work_descriptor(cmd_ctx, ep->dsa_context); - if (dsa_cmd_context->dir == OFI_COPY_BUF_TO_IOV) + if (pend->sar_dir == OFI_COPY_BUF_TO_IOV) dsa_prepare_copy_desc(desc, cmd_size, (uintptr_t) - sar_buf, (uintptr_t) iov_buf); + sar_buf, (uintptr_t) iov_buf); else dsa_prepare_copy_desc(desc, cmd_size, (uintptr_t) - iov_buf, (uintptr_t) sar_buf); + iov_buf, (uintptr_t) sar_buf); - dsa_desc_submit(dsa_context, desc); + dsa_desc_submit(ep->dsa_context, desc); cmd_index++; dsa_bytes_pending += cmd_size; @@ -596,11 +595,13 @@ static void smr_dsa_copy_sar(struct smr_freestack *sar_pool, } assert(dsa_bytes_pending > 0); - dsa_cmd_context->bytes_in_progress = dsa_bytes_pending; - dsa_context->copy_type_stats[dsa_cmd_context->dir]++; - dsa_cmd_context->op = cmd->msg.hdr.op; -} + cmd_ctx->bytes_in_progress = dsa_bytes_pending; + dsa_ctx->copy_type_stats[pend->sar_dir]++; + /* FI_EBUSY indicates command was issues successfully but contents are + * not ready yet */ + return -FI_EBUSY; +} static void dsa_process_partially_completed_desc(struct smr_dsa_context *dsa_context, @@ -636,55 +637,57 @@ dsa_process_partially_completed_desc(struct smr_dsa_context *dsa_context, dsa_desc_submit(dsa_context, dsa_descriptor); } -static void dsa_update_tx_entry(struct smr_region *smr, - struct dsa_cmd_context *dsa_cmd_context) +static void dsa_complete_tx_work(struct smr_ep *ep, struct smr_pend_entry *pend) { - struct smr_cmd *cmd; - struct smr_tx_entry *tx_entry = dsa_cmd_context->entry_ptr; + int ret; - tx_entry->bytes_done += dsa_cmd_context->bytes_in_progress; - cmd = &tx_entry->cmd; - //resp = smr_get_ptr(smr, cmd->msg.hdr.src_data); + if (pend->bytes_done == pend->cmd->hdr.size && + pend->cmd->hdr.op == ofi_op_read_req) { + ret = smr_complete_tx(ep, pend->comp_ctx, pend->cmd->hdr.op, + pend->comp_flags); + if (ret) + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, + "unable to process tx completion\n"); - // assert(resp->status == SMR_STATUS_BUSY); - // resp->status = (dsa_cmd_context->dir == OFI_COPY_IOV_TO_BUF ? - // SMR_STATUS_SAR_FULL : SMR_STATUS_SAR_EMPTY); + smr_peer_data(ep->region)[pend->cmd->hdr.tx_id].sar_status = + SMR_SAR_FREE; + ofi_buf_free(pend); + return; + } + + smr_peer_data(ep->region)[pend->cmd->hdr.tx_id].sar_status = + SMR_SAR_READY; } -static void dsa_update_sar_entry(struct smr_region *smr, - struct dsa_cmd_context *dsa_cmd_context) +static void dsa_complete_rx_work(struct smr_ep *ep, struct smr_pend_entry *pend) { - struct smr_pend_entry *sar_entry = dsa_cmd_context->entry_ptr; - struct smr_region *peer_smr; - struct smr_cmd *cmd; - - sar_entry->bytes_done += dsa_cmd_context->bytes_in_progress; - cmd = &sar_entry->cmd; - peer_smr = smr_peer_region(smr, cmd->msg.hdr.id); - // resp = smr_get_ptr(peer_smr, cmd->msg.hdr.src_data); + int ret; - // assert(resp->status == SMR_STATUS_BUSY); - // resp->status = (dsa_cmd_context->dir == OFI_COPY_IOV_TO_BUF ? - // SMR_STATUS_SAR_FULL : SMR_STATUS_SAR_EMPTY); + if (pend->bytes_done == pend->cmd->hdr.size) { + ret = smr_complete_rx(ep, pend->comp_ctx, pend->cmd->hdr.op, + pend->comp_flags, pend->bytes_done, + pend->iov[0].iov_base, + pend->cmd->hdr.rx_id, pend->cmd->hdr.tag, + pend->cmd->hdr.cq_data); + if (ret) { + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, + "unable to process rx completion\n"); + } + } + smr_return_cmd(ep, pend->cmd); } -static void dsa_process_complete_work(struct smr_region *smr, - struct dsa_cmd_context *dsa_cmd_context, - struct smr_dsa_context *dsa_context) +static void dsa_process_complete_work(struct smr_ep *ep, + struct dsa_cmd_context *cmd_ctx) { - if (dsa_cmd_context->op == ofi_op_read_req) { - if (dsa_cmd_context->dir == OFI_COPY_BUF_TO_IOV) - dsa_update_tx_entry(smr, dsa_cmd_context); - else - dsa_update_sar_entry(smr, dsa_cmd_context); - } else { - if (dsa_cmd_context->dir == OFI_COPY_IOV_TO_BUF) - dsa_update_tx_entry(smr, dsa_cmd_context); - else - dsa_update_sar_entry(smr, dsa_cmd_context); - } + cmd_ctx->pend->bytes_done += cmd_ctx->bytes_in_progress; - dsa_free_cmd_context(dsa_cmd_context, dsa_context); + if (cmd_ctx->pend->type == SMR_RX_ENTRY) + dsa_complete_rx_work(ep, cmd_ctx->pend); + else + dsa_complete_tx_work(ep, cmd_ctx->pend); + + dsa_free_cmd_context(cmd_ctx, ep->dsa_context); } static inline void @@ -698,23 +701,22 @@ dsa_page_fault_debug_info(struct dsa_cmd_context *dsa_cmd_context, !(dsa_work_comp->status & DSA_COMP_STATUS_WRITE), dsa_work_comp->status & DSA_COMP_STATUS_WRITE, (void *)dsa_work_comp->fault_addr, - dsa_cmd_context->dir, dsa_cmd_context->index); + dsa_cmd_context->pend->sar_dir, dsa_cmd_context->index); } -static bool dsa_check_cmd_status(struct smr_dsa_context *dsa_context, - struct dsa_cmd_context *dsa_cmd_context) +static bool dsa_check_cmd_status(struct smr_dsa_context *dsa_ctx, + struct dsa_cmd_context *cmd_ctx) { int i; struct dsa_hw_desc *dsa_work; struct dsa_completion_record *dsa_work_comp; bool dsa_cmd_completed = true; uint8_t status_value = 0; - dsa_work = dsa_get_work_descriptor_array_ptr(dsa_cmd_context, - dsa_context); - dsa_work_comp = - dsa_get_work_completion_array_ptr(dsa_cmd_context, dsa_context); - for (i = 0; i < dsa_cmd_context->batch_size; i++) { + dsa_work = dsa_get_work_descriptor_array_ptr(cmd_ctx, dsa_ctx); + dsa_work_comp = dsa_get_work_completion_array_ptr(cmd_ctx, dsa_ctx); + + for (i = 0; i < cmd_ctx->batch_size; i++) { status_value = dsa_work_comp[i].status & DSA_COMP_STATUS_MASK; switch (status_value) { @@ -724,17 +726,17 @@ static bool dsa_check_cmd_status(struct smr_dsa_context *dsa_context, dsa_cmd_completed = false; break; case DSA_COMP_PAGE_FAULT_NOBOF: - dsa_page_fault_debug_info(dsa_cmd_context, - &dsa_work_comp[i]); - dsa_process_partially_completed_desc(dsa_context, - &dsa_work[i]); - dsa_context->page_fault_stats[dsa_cmd_context->dir]++; + dsa_page_fault_debug_info(cmd_ctx, + &dsa_work_comp[i]); + dsa_process_partially_completed_desc(dsa_ctx, + &dsa_work[i]); + dsa_ctx->page_fault_stats[cmd_ctx->pend->sar_dir]++; dsa_cmd_completed = false; break; default: FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "Unhandled status codes: 0x%x\n", - status_value); + "Unhandled status codes: 0x%x\n", + status_value); assert(0); } } @@ -909,7 +911,7 @@ void smr_dsa_context_init(struct smr_ep *ep) if (!ep->dsa_context) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "aligned_alloc failed for dsa_context\n"); + "aligned_alloc failed for dsa_context\n"); goto alloc_error; } @@ -920,7 +922,7 @@ void smr_dsa_context_init(struct smr_ep *ep) if (wq_count == 0) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "error: wq mmap and wq write not supported\n"); + "error: wq mmap and wq write not supported\n"); goto wq_get_error; } @@ -933,7 +935,7 @@ void smr_dsa_context_init(struct smr_ep *ep) dsa_context->wq_count = wq_count; FI_DBG(&smr_prov, FI_LOG_EP_CTRL, "Numa node of endpoint CPU: %d\n", - numa_node); + numa_node); return; wq_get_error: @@ -972,68 +974,24 @@ void smr_dsa_context_cleanup(struct smr_ep *ep) void smr_dsa_progress(struct smr_ep *ep) { int index; - struct dsa_cmd_context *dsa_cmd_context; + struct dsa_cmd_context *cmd_ctx; bool dsa_cmd_completed; struct smr_dsa_context *dsa_context = ep->dsa_context; if (!dsa_is_work_in_progress(ep->dsa_context)) return; - pthread_spin_lock(&ep->region->lock); for (index = 0; index < CMD_CONTEXT_COUNT; index++) { - dsa_cmd_context = dsa_get_cmd_context(dsa_context, index); + cmd_ctx = dsa_get_cmd_context(dsa_context, index); - if (!dsa_cmd_context) + if (!cmd_ctx) continue; - dsa_cmd_completed = dsa_check_cmd_status(dsa_context, - dsa_cmd_context); + dsa_cmd_completed = dsa_check_cmd_status(dsa_context, cmd_ctx); if (dsa_cmd_completed) - dsa_process_complete_work(ep->region, dsa_cmd_context, - dsa_context); + dsa_process_complete_work(ep, cmd_ctx); } - pthread_spin_unlock(&ep->region->lock); -} - -size_t smr_dsa_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_cmd *cmd, const struct iovec *iov, size_t count, - size_t *bytes_done) -{ - struct dsa_cmd_context *dsa_cmd_context; - - assert(smr_env.use_dsa_sar); - - dsa_cmd_context = dsa_allocate_cmd_context(ep->dsa_context); - if (!dsa_cmd_context) - return -FI_ENOMEM; - - dsa_cmd_context->dir = OFI_COPY_IOV_TO_BUF; - dsa_cmd_context->entry_ptr = entry_ptr; - smr_dsa_copy_sar(sar_pool, ep->dsa_context, dsa_cmd_context, cmd, iov, - count, bytes_done, ep->region); - - return FI_SUCCESS; -} - -size_t smr_dsa_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_cmd *cmd, const struct iovec *iov, size_t count, - size_t *bytes_done) -{ - struct dsa_cmd_context *dsa_cmd_context; - - assert(smr_env.use_dsa_sar); - - dsa_cmd_context = dsa_allocate_cmd_context(ep->dsa_context); - if (!dsa_cmd_context) - return -FI_ENOMEM; - - dsa_cmd_context->dir = OFI_COPY_BUF_TO_IOV; - dsa_cmd_context->entry_ptr = entry_ptr; - smr_dsa_copy_sar(sar_pool, ep->dsa_context, dsa_cmd_context, cmd, iov, - count, bytes_done, ep->region); - - return FI_SUCCESS; } #else @@ -1041,16 +999,7 @@ size_t smr_dsa_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, void smr_dsa_init(void) {} void smr_dsa_cleanup(void) {} -size_t smr_dsa_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_cmd *cmd, const struct iovec *iov, size_t count, - size_t *bytes_done) -{ - return -FI_ENOSYS; -} - -size_t smr_dsa_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_cmd *cmd, const struct iovec *iov, size_t count, - size_t *bytes_done) +ssize_t smr_dsa_copy_sar(struct smr_ep *ep, struct smr_pend_entry *pend) { return -FI_ENOSYS; } diff --git a/prov/shm/src/smr_ep.c b/prov/shm/src/smr_ep.c index 1a2e9325316..93fc7d696d2 100644 --- a/prov/shm/src/smr_ep.c +++ b/prov/shm/src/smr_ep.c @@ -177,7 +177,7 @@ static void smr_send_name(struct smr_ep *ep, int64_t id) ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); ce->cmd.hdr.op = SMR_OP_MAX + ofi_ctrl_connreq; - ce->cmd.hdr.id = id; + ce->cmd.hdr.tx_id = id; ce->cmd.hdr.cq_data = ep->region->pid; @@ -216,11 +216,11 @@ int64_t smr_verify_peer(struct smr_ep *ep, fi_addr_t fi_addr) void smr_format_tx_pend(struct smr_pend_entry *pend, void *context, struct ofi_mr **mr, const struct iovec *iov, - uint32_t iov_count, uint64_t op_flags, int64_t id) + uint32_t iov_count, uint64_t op_flags) { - pend->tx.context = context; - pend->tx.peer_id = id; - pend->tx.op_flags = op_flags; + pend->type = SMR_TX_ENTRY; + pend->comp_ctx = context; + pend->comp_flags = op_flags; memcpy(pend->iov, iov, sizeof(*iov) * iov_count); pend->iov_count = iov_count; @@ -231,16 +231,19 @@ void smr_format_tx_pend(struct smr_pend_entry *pend, void *context, else memset(pend->mr, 0, sizeof(*mr) * iov_count); + } -void smr_generic_format(struct smr_cmd *cmd, int64_t peer_id, uint32_t op, - uint64_t tag, uint64_t data, uint64_t op_flags) +void smr_generic_format(struct smr_cmd *cmd, int64_t tx_id, int64_t rx_id, + uint32_t op, uint64_t tag, uint64_t data, + uint64_t op_flags) { cmd->hdr.op = op; cmd->hdr.status = 0; cmd->hdr.op_flags = 0; cmd->hdr.tag = tag; - cmd->hdr.id = peer_id; + cmd->hdr.tx_id = tx_id; + cmd->hdr.rx_id = rx_id; cmd->hdr.cq_data = data; cmd->hdr.rx_ctx = 0; @@ -312,54 +315,22 @@ static int smr_format_ipc(struct smr_cmd *cmd, void *ptr, size_t len, return FI_SUCCESS; } -size_t smr_copy_to_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_cmd *cmd, struct ofi_mr **mr, - const struct iovec *iov, size_t count, - size_t *bytes_done) +ssize_t smr_copy_sar(struct smr_ep *ep, struct smr_pend_entry *pend) { + struct smr_freestack *sar_pool; struct smr_sar_buf *sar_buf; int next_sar_buf = 0; - if (smr_env.use_dsa_sar && ofi_mr_all_host(mr, count)) - return smr_dsa_copy_to_sar(ep, sar_pool, cmd, iov, count, - bytes_done); - - while ((*bytes_done < cmd->hdr.size) && - (next_sar_buf < cmd->data.buf_batch_size)) { + sar_pool = smr_pend_sar_pool(ep, pend); + while (pend->bytes_done < pend->cmd->hdr.size && + next_sar_buf < pend->cmd->data.buf_batch_size) { sar_buf = smr_freestack_get_entry_from_index( - sar_pool, cmd->data.sar[next_sar_buf]); - - *bytes_done += ofi_copy_from_mr_iov( - sar_buf->buf, SMR_SAR_SIZE, mr, iov, count, - *bytes_done); - - next_sar_buf++; - } - - return FI_SUCCESS; -} - -size_t smr_copy_from_sar(struct smr_ep *ep, struct smr_freestack *sar_pool, - struct smr_cmd *cmd, struct ofi_mr **mr, - const struct iovec *iov, size_t count, - size_t *bytes_done) -{ - struct smr_sar_buf *sar_buf; - int next_sar_buf = 0; - - //set copy functions in sar entry? - if (smr_env.use_dsa_sar && ofi_mr_all_host(mr, count)) - return smr_dsa_copy_to_sar(ep, sar_pool, cmd, iov, count, - bytes_done); - - while ((*bytes_done < cmd->hdr.size) && - (next_sar_buf < cmd->data.buf_batch_size)) { - sar_buf = smr_freestack_get_entry_from_index( - sar_pool, cmd->data.sar[next_sar_buf]); - - *bytes_done += ofi_copy_to_mr_iov(mr, iov, count, *bytes_done, - sar_buf->buf, SMR_SAR_SIZE); + sar_pool, pend->cmd->data.sar[next_sar_buf]); + pend->bytes_done += ofi_copy_mr_iov( + pend->mr, pend->iov, pend->iov_count, + pend->bytes_done, sar_buf->buf, + SMR_SAR_SIZE, pend->sar_dir); next_sar_buf++; } @@ -370,12 +341,12 @@ static int smr_format_sar(struct smr_ep *ep, struct smr_cmd *cmd, struct ofi_mr **mr, const struct iovec *iov, size_t count, size_t total_len, struct smr_region *smr, struct smr_region *peer_smr, - int64_t id, struct smr_pend_entry *pending) + struct smr_pend_entry *pend) { int i, ret; if (ep->region->max_sar_buf_per_peer == 0 || - smr_peer_data(ep->region)[id].sar_status) + smr_peer_data(ep->region)[cmd->hdr.tx_id].sar_status) return -FI_EAGAIN; cmd->data.buf_batch_size = MIN( @@ -393,14 +364,15 @@ static int smr_format_sar(struct smr_ep *ep, struct smr_cmd *cmd, cmd->hdr.proto = smr_proto_sar; cmd->hdr.size = total_len; + //probably remove this - we should never have a 0 byte SAR now that + //injects can be delivery complete /* Nothing to copy for 0 byte transfer */ - if (!cmd->hdr.size) - goto out; + // if (!cmd->hdr.size) + // goto out; if (cmd->hdr.op != ofi_op_read_req) { - ret = smr_copy_to_sar(ep, smr_sar_pool(ep->region), cmd, - mr, iov, count, &pending->bytes_done); - if (ret < 0) { + ret = pend->sar_copy_fn(ep, pend); + if (ret < 0 && ret != -FI_EBUSY) { for (i = cmd->data.buf_batch_size - 1; i >= 0; i--) { smr_freestack_push_by_index( smr_sar_pool(ep->region), @@ -408,9 +380,13 @@ static int smr_format_sar(struct smr_ep *ep, struct smr_cmd *cmd, } return -FI_EAGAIN; } + smr_peer_data(ep->region)[cmd->hdr.tx_id].sar_status = + SMR_SAR_BUSY; + } else { + smr_peer_data(ep->region)[cmd->hdr.tx_id].sar_status = + SMR_SAR_READY; } -out: - smr_peer_data(smr)[id].sar_status = FI_EBUSY; + return FI_SUCCESS; } @@ -479,21 +455,21 @@ int smr_select_proto(void **desc, size_t iov_count, bool vma_avail, } static ssize_t smr_do_inline(struct smr_ep *ep, struct smr_region *peer_smr, - int64_t id, int64_t peer_id, uint32_t op, + int64_t tx_id, int64_t rx_id, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags, struct ofi_mr **desc, const struct iovec *iov, size_t iov_count, size_t total_len, void *context, struct smr_cmd *cmd) { cmd->hdr.tx_ctx = 0; - smr_generic_format(cmd, peer_id, op, tag, data, op_flags); + smr_generic_format(cmd, tx_id, rx_id, op, tag, data, op_flags); smr_format_inline(cmd, desc, iov, iov_count); return FI_SUCCESS; } static ssize_t smr_do_inject(struct smr_ep *ep, struct smr_region *peer_smr, - int64_t id, int64_t peer_id, uint32_t op, + int64_t tx_id, int64_t rx_id, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags, struct ofi_mr **desc, const struct iovec *iov, size_t iov_count, size_t total_len, void *context, @@ -507,19 +483,19 @@ static ssize_t smr_do_inject(struct smr_ep *ep, struct smr_region *peer_smr, cmd->hdr.tx_ctx = (uintptr_t) pend; smr_format_tx_pend(pend, context, desc, iov, iov_count, - op_flags, id); + op_flags); } else { cmd->hdr.tx_ctx = 0; } - smr_generic_format(cmd, peer_id, op, tag, data, op_flags); + smr_generic_format(cmd, tx_id, rx_id, op, tag, data, op_flags); smr_format_inject(cmd, desc, iov, iov_count, ep->region); return FI_SUCCESS; } static ssize_t smr_do_iov(struct smr_ep *ep, struct smr_region *peer_smr, - int64_t id, int64_t peer_id, uint32_t op, + int64_t tx_id, int64_t rx_id, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags, struct ofi_mr **desc, const struct iovec *iov, size_t iov_count, size_t total_len, void *context, @@ -531,16 +507,16 @@ static ssize_t smr_do_iov(struct smr_ep *ep, struct smr_region *peer_smr, assert(pend); cmd->hdr.tx_ctx = (uintptr_t) pend; - smr_generic_format(cmd, peer_id, op, tag, data, op_flags); + smr_generic_format(cmd, tx_id, rx_id, op, tag, data, op_flags); smr_format_iov(cmd, iov, iov_count, total_len, ep->region); - smr_format_tx_pend(pend, context, desc, iov, iov_count, op_flags, id); + smr_format_tx_pend(pend, context, desc, iov, iov_count, op_flags); return FI_SUCCESS; } static ssize_t smr_do_sar(struct smr_ep *ep, struct smr_region *peer_smr, - int64_t id, int64_t peer_id, uint32_t op, + int64_t tx_id, int64_t rx_id, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags, struct ofi_mr **desc, const struct iovec *iov, size_t iov_count, size_t total_len, void *context, @@ -553,21 +529,26 @@ static ssize_t smr_do_sar(struct smr_ep *ep, struct smr_region *peer_smr, assert(pend); cmd->hdr.tx_ctx = (uintptr_t) pend; - smr_format_tx_pend(pend, context, desc, iov, iov_count, op_flags, id); + smr_format_tx_pend(pend, context, desc, iov, iov_count, op_flags); + pend->sar_dir = pend->cmd->hdr.op == ofi_op_read_req ? + OFI_COPY_BUF_TO_IOV : OFI_COPY_IOV_TO_BUF; + + if (smr_env.use_dsa_sar && ofi_mr_all_host(pend->mr, pend->iov_count)) + pend->sar_copy_fn = &smr_dsa_copy_sar; + else + pend->sar_copy_fn = &smr_copy_sar; - smr_generic_format(cmd, peer_id, op, tag, data, op_flags); + smr_generic_format(cmd, tx_id, rx_id, op, tag, data, op_flags); ret = smr_format_sar(ep, cmd, desc, iov, iov_count, total_len, - ep->region, peer_smr, id, pend); - if (ret) { + ep->region, peer_smr, pend); + if (ret) ofi_buf_free(pend); - return ret; - } - return FI_SUCCESS; + return ret; } static ssize_t smr_do_ipc(struct smr_ep *ep, struct smr_region *peer_smr, - int64_t id, int64_t peer_id, uint32_t op, + int64_t tx_id, int64_t rx_id, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags, struct ofi_mr **desc, const struct iovec *iov, size_t iov_count, size_t total_len, void *context, @@ -580,7 +561,7 @@ static ssize_t smr_do_ipc(struct smr_ep *ep, struct smr_region *peer_smr, assert(pend); cmd->hdr.tx_ctx = (uintptr_t) pend; - smr_generic_format(cmd, peer_id, op, tag, data, op_flags); + smr_generic_format(cmd, tx_id, rx_id, op, tag, data, op_flags); assert(iov_count == 1 && desc && desc[0]); ret = smr_format_ipc(cmd, iov[0].iov_base, total_len, ep->region, desc[0]->iface, desc[0]->device); @@ -590,12 +571,12 @@ static ssize_t smr_do_ipc(struct smr_ep *ep, struct smr_region *peer_smr, "unable to use IPC for msg, " "fallback to using SAR\n"); ofi_buf_free(pend); - return smr_do_sar(ep, peer_smr, id, peer_id, op, tag, data, + return smr_do_sar(ep, peer_smr, tx_id, rx_id, op, tag, data, op_flags, desc, iov, iov_count, total_len, context, cmd); } - smr_format_tx_pend(pend, context, desc, iov, iov_count, op_flags, id); + smr_format_tx_pend(pend, context, desc, iov, iov_count, op_flags); return FI_SUCCESS; } @@ -808,10 +789,15 @@ static int smr_ep_ctrl(struct fid *fid, int command, void *arg) ep->region->flags |= SMR_FLAG_CMA_INIT; } - if (ofi_hmem_any_ipc_enabled()) - ep->smr_progress_ipc_list = smr_progress_ipc_list; - else - ep->smr_progress_ipc_list = smr_progress_ipc_list_noop; + if (ofi_hmem_any_ipc_enabled()) { + ep->smr_progress_async_list = smr_progress_async_list; + } else { +#if SHM_HAVE_DSA + ep->smr_progress_async_list = smr_progress_async_list; +#else + ep->smr_progress_async_list = smr_progress_async_list_noop; +#endif + } if (!ep->srx) { domain = container_of(ep->util_ep.domain, @@ -979,7 +965,7 @@ int smr_endpoint(struct fid_domain *domain, struct fi_info *info, if (ret) goto ep; - dlist_init(&ep->ipc_cpy_pend_list); + dlist_init(&ep->async_cpy_list); slist_init(&ep->overflow_list); ep->min_multi_recv_size = SMR_INJECT_SIZE; diff --git a/prov/shm/src/smr_msg.c b/prov/shm/src/smr_msg.c index e46c284ce59..3581a2ab752 100644 --- a/prov/shm/src/smr_msg.c +++ b/prov/shm/src/smr_msg.c @@ -79,7 +79,7 @@ static ssize_t smr_generic_sendmsg(struct smr_ep *ep, const struct iovec *iov, uint64_t op_flags) { struct smr_region *peer_smr; - int64_t id, peer_id, pos; + int64_t tx_id, rx_id, pos; ssize_t ret = 0; size_t total_len; int proto; @@ -88,14 +88,14 @@ static ssize_t smr_generic_sendmsg(struct smr_ep *ep, const struct iovec *iov, assert(iov_count <= SMR_IOV_LIMIT); - id = smr_verify_peer(ep, addr); - if (id < 0) + tx_id = smr_verify_peer(ep, addr); + if (tx_id < 0) return -FI_EAGAIN; - peer_id = smr_peer_data(ep->region)[id].id; - peer_smr = smr_peer_region(ep, id); + rx_id = smr_peer_data(ep->region)[tx_id].id; + peer_smr = smr_peer_region(ep, tx_id); - if (smr_peer_data(ep->region)[id].sar_status) + if (smr_peer_data(ep->region)[tx_id].sar_status) return -FI_EAGAIN; ret = smr_cmd_queue_next(smr_cmd_queue(peer_smr), &ce, &pos); @@ -108,7 +108,7 @@ static ssize_t smr_generic_sendmsg(struct smr_ep *ep, const struct iovec *iov, assert(!(op_flags & FI_INJECT) || total_len <= SMR_INJECT_SIZE); proto = smr_select_proto(desc, iov_count, smr_vma_enabled(ep, peer_smr), - smr_ipc_valid(ep, peer_smr, id, peer_id), op, + smr_ipc_valid(ep, peer_smr, tx_id, rx_id), op, total_len, op_flags); if (proto != smr_proto_inline) { @@ -120,13 +120,13 @@ static ssize_t smr_generic_sendmsg(struct smr_ep *ep, const struct iovec *iov, cmd = smr_freestack_pop(smr_cmd_stack(ep->region)); assert(cmd); - ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); + ce->ptr = smr_local_to_peer(ep, tx_id, rx_id, (uintptr_t) cmd); } else { cmd = &ce->cmd; - ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); + ce->ptr = smr_peer_to_peer(ep, tx_id, (uintptr_t) &ce->cmd); } - ret = smr_send_ops[proto](ep, peer_smr, id, peer_id, op, tag, data, + ret = smr_send_ops[proto](ep, peer_smr, tx_id, rx_id, op, tag, data, op_flags, (struct ofi_mr **) desc, iov, iov_count, total_len, context, cmd); if (ret) { @@ -197,7 +197,7 @@ static ssize_t smr_generic_inject(struct fid_ep *ep_fid, const void *buf, { struct smr_ep *ep; struct smr_region *peer_smr; - int64_t id, peer_id, pos; + int64_t tx_id, rx_id, pos; ssize_t ret = 0; struct iovec msg_iov; int proto; @@ -211,15 +211,15 @@ static ssize_t smr_generic_inject(struct fid_ep *ep_fid, const void *buf, ep = container_of(ep_fid, struct smr_ep, util_ep.ep_fid.fid); - id = smr_verify_peer(ep, dest_addr); - if (id < 0) + tx_id = smr_verify_peer(ep, dest_addr); + if (tx_id < 0) return -FI_EAGAIN; - peer_id = smr_peer_data(ep->region)[id].id; - peer_smr = smr_peer_region(ep, id); + rx_id = smr_peer_data(ep->region)[tx_id].id; + peer_smr = smr_peer_region(ep, tx_id); ofi_genlock_lock(&ep->util_ep.lock); - if (smr_peer_data(ep->region)[id].sar_status) { + if (smr_peer_data(ep->region)[tx_id].sar_status) { ret = -FI_EAGAIN; goto unlock; } @@ -230,12 +230,12 @@ static ssize_t smr_generic_inject(struct fid_ep *ep_fid, const void *buf, goto unlock; } - ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); + ce->ptr = smr_peer_to_peer(ep, tx_id, (uintptr_t) &ce->cmd); if (len <= SMR_MSG_DATA_LEN) { proto = smr_proto_inline; cmd = &ce->cmd; - ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); + ce->ptr = smr_peer_to_peer(ep, tx_id, (uintptr_t) &ce->cmd); } else { proto = smr_proto_inject; if (smr_freestack_isempty(smr_cmd_stack(ep->region))) { @@ -246,10 +246,10 @@ static ssize_t smr_generic_inject(struct fid_ep *ep_fid, const void *buf, cmd = smr_freestack_pop(smr_cmd_stack(ep->region)); assert(cmd); - ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); + ce->ptr = smr_local_to_peer(ep, tx_id, rx_id, (uintptr_t) cmd); } - ret = smr_send_ops[proto](ep, peer_smr, id, peer_id, op, tag, data, + ret = smr_send_ops[proto](ep, peer_smr, tx_id, rx_id, op, tag, data, op_flags, NULL, &msg_iov, 1, len, NULL, cmd); if (ret) { smr_cmd_queue_discard(ce, pos); diff --git a/prov/shm/src/smr_progress.c b/prov/shm/src/smr_progress.c index ea2c01b3a73..d82936d81f0 100644 --- a/prov/shm/src/smr_progress.c +++ b/prov/shm/src/smr_progress.c @@ -35,7 +35,6 @@ static void smr_progress_overflow(struct smr_ep *ep) { struct smr_cmd_entry *ce; - struct smr_pend_entry *pending; struct smr_region *peer_smr; struct smr_cmd *cmd; int64_t pos; @@ -45,14 +44,13 @@ static void smr_progress_overflow(struct smr_ep *ep) entry = ep->overflow_list.head; while (entry) { cmd = (struct smr_cmd *) entry; - pending = (struct smr_pend_entry *) cmd->hdr.tx_ctx; - peer_smr = smr_peer_region(ep, pending->tx.peer_id); + peer_smr = smr_peer_region(ep, cmd->hdr.tx_id); ret = smr_cmd_queue_next(smr_cmd_queue(peer_smr), &ce, &pos); if (ret == -FI_ENOENT) return; - ce->ptr = smr_local_to_peer(ep, pending->tx.peer_id, - cmd->hdr.id, (uintptr_t) cmd); + ce->ptr = smr_local_to_peer(ep, cmd->hdr.tx_id, + cmd->hdr.rx_id, (uintptr_t) cmd); slist_remove_head(&ep->overflow_list); smr_cmd_queue_commit(ce, pos); @@ -78,7 +76,7 @@ static inline void smr_free_sar_bufs(struct smr_ep *ep, struct smr_cmd *cmd, smr_freestack_push_by_index(smr_sar_pool(ep->region), cmd->data.sar[i]); } - smr_peer_data(ep->region)[pending->tx.peer_id].sar_status = 0; + smr_peer_data(ep->region)[cmd->hdr.tx_id].sar_status = SMR_SAR_FREE; } static int smr_progress_return_entry(struct smr_ep *ep, struct smr_cmd *cmd, @@ -102,14 +100,15 @@ static int smr_progress_return_entry(struct smr_ep *ep, struct smr_cmd *cmd, } if (cmd->hdr.op == ofi_op_read_req) { - ret = smr_copy_from_sar(ep, smr_sar_pool(ep->region), - cmd, pending->mr, pending->iov, - pending->iov_count, - &pending->bytes_done); + ret = pending->sar_copy_fn(ep, pending); + if (ret && ret != -FI_EBUSY) + return ret; if (pending->bytes_done == cmd->hdr.size) { smr_free_sar_bufs(ep, cmd, pending); return FI_SUCCESS; } + smr_peer_data(ep->region)[cmd->hdr.tx_id].sar_status = + SMR_SAR_BUSY; smr_try_send_cmd(ep, cmd); return -FI_EAGAIN; } @@ -119,10 +118,12 @@ static int smr_progress_return_entry(struct smr_ep *ep, struct smr_cmd *cmd, return FI_SUCCESS; } - ret = smr_copy_to_sar(ep, smr_sar_pool(ep->region), cmd, - pending->mr, pending->iov, - pending->iov_count, &pending->bytes_done); + ret = pending->sar_copy_fn(ep, pending); + if (ret && ret != -FI_EBUSY) + return ret; + smr_peer_data(ep->region)[cmd->hdr.tx_id].sar_status = + SMR_SAR_BUSY; smr_try_send_cmd(ep, cmd); return -FI_EAGAIN; case smr_proto_inject: @@ -172,7 +173,6 @@ static void smr_progress_return(struct smr_ep *ep) int64_t pos; int ret; - ofi_genlock_lock(&ep->util_ep.lock); while (1) { ret = smr_return_queue_head(smr_return_queue(ep->region), &queue_entry, &pos); @@ -188,15 +188,15 @@ static void smr_progress_return(struct smr_ep *ep) if (cmd->hdr.status) { ret = smr_write_err_comp( ep->util_ep.tx_cq, - pending->tx.context, - pending->tx.op_flags, + pending->comp_ctx, + pending->comp_flags, cmd->hdr.tag, cmd->hdr.status); } else { ret = smr_complete_tx( - ep, pending->tx.context, + ep, pending->comp_ctx, cmd->hdr.op, - pending->tx.op_flags); + pending->comp_flags); } if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -210,7 +210,6 @@ static void smr_progress_return(struct smr_ep *ep) smr_return_queue_release(smr_return_queue(ep->region), queue_entry, pos); } - ofi_genlock_unlock(&ep->util_ep.lock); } ssize_t smr_progress_inline(struct smr_ep *ep, struct smr_cmd *cmd, @@ -243,7 +242,7 @@ ssize_t smr_progress_inject(struct smr_ep *ep, struct smr_cmd *cmd, struct smr_inject_buf *tx_buf; ssize_t ret; - peer_smr = smr_peer_region(ep, cmd->hdr.id); + peer_smr = smr_peer_region(ep, cmd->hdr.rx_id); tx_buf = smr_get_inject_buf(peer_smr, cmd); if (cmd->hdr.op == ofi_op_read_req) { @@ -278,9 +277,9 @@ ssize_t smr_progress_iov(struct smr_ep *ep, struct smr_cmd *cmd, struct ofi_xpmem_client *xpmem; int ret; - peer_smr = smr_peer_region(ep, cmd->hdr.id); + peer_smr = smr_peer_region(ep, cmd->hdr.rx_id); - xpmem = &smr_peer_data(ep->region)[cmd->hdr.id].xpmem; + xpmem = &smr_peer_data(ep->region)[cmd->hdr.rx_id].xpmem; ret = ofi_shm_p2p_copy(ep->p2p_type, iov, iov_count, cmd->data.iov, cmd->data.iov_count, cmd->hdr.size, @@ -301,7 +300,7 @@ static void smr_buffer_sar(struct smr_ep *ep, struct smr_pend_entry *sar_entry, size_t bytes; int next_buf = 0; - peer_smr = smr_peer_region(ep, cmd->hdr.id); + peer_smr = smr_peer_region(ep, cmd->hdr.rx_id); while (next_buf < cmd->data.buf_batch_size && sar_entry->bytes_done < cmd->hdr.size) { @@ -327,65 +326,59 @@ static void smr_buffer_sar(struct smr_ep *ep, struct smr_pend_entry *sar_entry, } } +static void smr_try_copy_rx_sar(struct smr_ep *ep, struct smr_pend_entry *pend) +{ + ssize_t ret; + + ret = pend->sar_copy_fn(ep, pend); + if (ret) { + if (ret == -FI_EAGAIN) { + dlist_insert_tail(&ep->async_cpy_list, &pend->entry); + return; + } + /* -FI_EBUSY indicates full copy was submitted */ + if (ret != -FI_EBUSY) + pend->cmd->hdr.status = ret; + } +} + static int smr_progress_pending_sar(struct smr_ep *ep, struct smr_cmd *cmd) { - struct smr_pend_entry *sar_entry; - struct smr_region *peer_smr; - void *comp_ctx; - uint64_t comp_flags; + struct smr_pend_entry *pend; int ret; - sar_entry = (struct smr_pend_entry *) cmd->hdr.rx_ctx; - if (sar_entry->rx.rx_entry && sar_entry->rx.rx_entry->peer_context) { - smr_buffer_sar(ep, sar_entry, cmd); + pend = (struct smr_pend_entry *) cmd->hdr.rx_ctx; + if (pend->rx.rx_entry && pend->rx.rx_entry->peer_context) { + smr_buffer_sar(ep, pend, cmd); goto out; } - peer_smr = smr_peer_region(ep, cmd->hdr.id); + smr_try_copy_rx_sar(ep, pend); - if (cmd->hdr.op == ofi_op_read_req) - ret = smr_copy_to_sar(ep, smr_sar_pool(peer_smr), cmd, - sar_entry->mr, sar_entry->iov, - sar_entry->iov_count, - &sar_entry->bytes_done); - else - ret = smr_copy_from_sar(ep, smr_sar_pool(peer_smr), cmd, - sar_entry->mr, sar_entry->iov, - sar_entry->iov_count, - &sar_entry->bytes_done); - if (ret) - cmd->hdr.status = ret; - - if (sar_entry->bytes_done == cmd->hdr.size || ret) { - if (sar_entry->rx.rx_entry) { - comp_ctx = sar_entry->rx.rx_entry->context; - comp_flags = smr_rx_cq_flags( - sar_entry->rx.rx_entry->flags, - cmd->hdr.op_flags); - } else { - comp_ctx = NULL; - comp_flags = smr_rx_cq_flags(0, cmd->hdr.op_flags); - } - if (ret) { + if (pend->bytes_done == cmd->hdr.size || pend->cmd->hdr.status) { + if (pend->cmd->hdr.status) { ret = smr_write_err_comp(ep->util_ep.rx_cq, - comp_ctx, comp_flags, - cmd->hdr.tag, ret); + pend->comp_ctx, + pend->comp_flags, + cmd->hdr.tag, + pend->cmd->hdr.status); } else { - ret = smr_complete_rx(ep, comp_ctx, - cmd->hdr.op, comp_flags, - sar_entry->bytes_done, - sar_entry->iov[0].iov_base, - cmd->hdr.id, cmd->hdr.tag, + ret = smr_complete_rx(ep, pend->comp_ctx, + cmd->hdr.op, + pend->comp_flags, + pend->bytes_done, + pend->iov[0].iov_base, + cmd->hdr.rx_id, cmd->hdr.tag, cmd->hdr.cq_data); } if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "unable to process rx completion\n"); } - if (sar_entry->rx.rx_entry) - ep->srx->owner_ops->free_entry(sar_entry->rx.rx_entry); + if (pend->rx.rx_entry) + ep->srx->owner_ops->free_entry(pend->rx.rx_entry); - ofi_buf_free(sar_entry); + ofi_buf_free(pend); } out: @@ -403,18 +396,44 @@ static int smr_progress_pending(struct smr_ep *ep, struct smr_cmd *cmd) } } +static void smr_init_rx_pend(struct smr_pend_entry *pend, struct smr_cmd *cmd, + struct fi_peer_rx_entry *rx_entry, + struct ofi_mr **mr, struct iovec *iov, + size_t iov_count) +{ + pend->type = SMR_RX_ENTRY; + if (rx_entry) { + pend->comp_ctx = rx_entry->context; + pend->comp_flags = smr_rx_cq_flags(rx_entry->flags, + cmd->hdr.op_flags); + } else { + pend->comp_ctx = NULL; + pend->comp_flags = smr_rx_cq_flags(0, cmd->hdr.op_flags); + } + + pend->cmd = cmd; + + pend->sar_dir = pend->cmd->hdr.op == ofi_op_read_req ? + OFI_COPY_IOV_TO_BUF : OFI_COPY_BUF_TO_IOV; + + pend->bytes_done = 0; + memcpy(pend->iov, iov, sizeof(*iov) * iov_count); + pend->iov_count = iov_count; + pend->rx.rx_entry = rx_entry; + if (mr) + memcpy(pend->mr, mr, sizeof(*mr) * iov_count); + else + memset(pend->mr, 0, sizeof(*mr) * iov_count); +} + ssize_t smr_progress_sar(struct smr_ep *ep, struct smr_cmd *cmd, struct fi_peer_rx_entry *rx_entry, struct ofi_mr **mr, struct iovec *iov, size_t iov_count) { - struct smr_region *peer_smr; - struct smr_pend_entry *sar_entry = NULL; + struct smr_pend_entry *pend = NULL; struct iovec sar_iov[SMR_IOV_LIMIT]; - size_t bytes_done = 0; ssize_t ret = FI_SUCCESS; - peer_smr = smr_peer_region(ep, cmd->hdr.id); - /* Nothing to do for 0 byte transfer */ if (!cmd->hdr.size) goto out; @@ -422,28 +441,23 @@ ssize_t smr_progress_sar(struct smr_ep *ep, struct smr_cmd *cmd, memcpy(sar_iov, iov, sizeof(*iov) * iov_count); (void) ofi_truncate_iov(sar_iov, &iov_count, cmd->hdr.size); - if (cmd->hdr.op == ofi_op_read_req) - ret = smr_copy_to_sar(ep, smr_sar_pool(peer_smr), cmd, - mr, sar_iov, iov_count, &bytes_done); - else - ret = smr_copy_from_sar(ep, smr_sar_pool(peer_smr), cmd, - mr, sar_iov, iov_count, &bytes_done); - cmd->hdr.status = ret; - - if (bytes_done == cmd->hdr.size) - goto out; + pend = ofi_buf_alloc(ep->pend_pool); + assert(pend); - sar_entry = ofi_buf_alloc(ep->pend_pool); + cmd->hdr.rx_ctx = (uintptr_t) pend; - cmd->hdr.rx_ctx = (uintptr_t) sar_entry; - sar_entry->bytes_done = bytes_done; - memcpy(sar_entry->iov, sar_iov, sizeof(*sar_iov) * iov_count); - sar_entry->iov_count = iov_count; - sar_entry->rx.rx_entry = rx_entry; - if (mr) - memcpy(sar_entry->mr, mr, sizeof(*mr) * iov_count); + smr_init_rx_pend(pend, cmd, rx_entry, mr, sar_iov, iov_count); + if (smr_env.use_dsa_sar && ofi_mr_all_host(pend->mr, pend->iov_count)) + pend->sar_copy_fn = &smr_dsa_copy_sar; else - memset(sar_entry->mr, 0, sizeof(*mr) * iov_count); + pend->sar_copy_fn = &smr_copy_sar; + + smr_try_copy_rx_sar(ep, pend); + + if (pend->bytes_done == cmd->hdr.size) { + cmd->hdr.rx_ctx = 0; + ofi_buf_free(pend); + } out: smr_return_cmd(ep, cmd); return ret; @@ -464,12 +478,8 @@ static int smr_ipc_async_copy(struct smr_ep *ep, struct smr_cmd *cmd, return -FI_ENOMEM; cmd->hdr.rx_ctx = (uintptr_t) ipc_entry; + smr_init_rx_pend(ipc_entry, cmd, rx_entry, NULL, iov, iov_count); ipc_entry->rx.ipc_entry = mr_entry; - ipc_entry->bytes_done = 0; - ipc_entry->rx.cmd = cmd; - memcpy(ipc_entry->iov, iov, sizeof(*iov) * iov_count); - ipc_entry->iov_count = iov_count; - ipc_entry->rx.rx_entry = rx_entry; ret = ofi_create_async_copy_event(iface, device, &ipc_entry->rx.async_event); @@ -489,7 +499,7 @@ static int smr_ipc_async_copy(struct smr_ep *ep, struct smr_cmd *cmd, if (ret < 0) goto fail; - dlist_insert_tail(&ipc_entry->rx.entry, &ep->ipc_cpy_pend_list); + dlist_insert_tail(&ipc_entry->entry, &ep->async_cpy_list); return FI_SUCCESS; fail: @@ -511,10 +521,10 @@ ssize_t smr_progress_ipc(struct smr_ep *ep, struct smr_cmd *cmd, if (cmd->data.ipc_info.iface == FI_HMEM_ZE) ze_set_pid_fd((void **) &cmd->data.ipc_info.ipc_handle, - ep->map->peers[cmd->hdr.id].pid_fd); + ep->map->peers[cmd->hdr.rx_id].pid_fd); //TODO disable IPC if more than 1 interface is initialized - ret = ofi_ipc_cache_search(domain->ipc_cache, cmd->hdr.id, + ret = ofi_ipc_cache_search(domain->ipc_cache, cmd->hdr.rx_id, &cmd->data.ipc_info, &mr_entry); if (ret) goto out; @@ -638,7 +648,7 @@ static int smr_progress_inject_atomic(struct smr_cmd *cmd, struct ofi_mr **mr, uint8_t *src, *comp; int i; - tx_buf = smr_get_inject_buf(smr_peer_region(ep, cmd->hdr.id), cmd); + tx_buf = smr_get_inject_buf(smr_peer_region(ep, cmd->hdr.rx_id), cmd); if (err) goto out; @@ -701,7 +711,7 @@ static int smr_start_common(struct smr_ep *ep, struct smr_cmd *cmd, ret = smr_complete_rx(ep, rx_entry->context, cmd->hdr.op, comp_flags, cmd->hdr.size, comp_buf, - cmd->hdr.id, cmd->hdr.tag, + cmd->hdr.rx_id, cmd->hdr.tag, cmd->hdr.cq_data); } if (ret) { @@ -756,7 +766,7 @@ static int smr_copy_saved(struct smr_cmd_ctx *cmd_ctx, ret = smr_complete_rx(cmd_ctx->ep, rx_entry->context, cmd_ctx->cmd->hdr.op, comp_flags, bytes, rx_entry->iov[0].iov_base, - cmd_ctx->cmd->hdr.id, + cmd_ctx->cmd->hdr.rx_id, cmd_ctx->cmd->hdr.tag, cmd_ctx->cmd->hdr.cq_data); if (ret) { @@ -828,8 +838,8 @@ static void smr_progress_connreq(struct smr_ep *ep, struct smr_cmd *cmd) } smr_set_ipc_valid(ep, idx); - smr_peer_data(peer_smr)[cmd->hdr.id].id = idx; - smr_peer_data(ep->region)[idx].id = cmd->hdr.id; + smr_peer_data(peer_smr)[cmd->hdr.rx_id].id = idx; + smr_peer_data(ep->region)[idx].id = cmd->hdr.rx_id; assert(ep->map->num_peers > 0); ep->region->max_sar_buf_per_peer = MIN( @@ -914,7 +924,7 @@ static int smr_progress_cmd_msg(struct smr_ep *ep, struct smr_cmd *cmd) if (cmd->hdr.rx_ctx) return smr_progress_pending(ep, cmd); - attr.addr = ep->map->peers[cmd->hdr.id].fiaddr; + attr.addr = ep->map->peers[cmd->hdr.rx_id].fiaddr; attr.msg_size = cmd->hdr.size; attr.tag = cmd->hdr.tag; if (cmd->hdr.op == ofi_op_tagged) { @@ -997,7 +1007,7 @@ static int smr_progress_cmd_rma(struct smr_ep *ep, struct smr_cmd *cmd) assert(cmd->hdr.proto < smr_proto_max); ret = smr_progress_ops[cmd->hdr.proto](ep, cmd, NULL, mr, iov, iov_count); - if (ret) { + if (ret && ret != -FI_EBUSY) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "error processing rma op\n"); ret = smr_write_err_comp(ep->util_ep.rx_cq, NULL, @@ -1008,7 +1018,7 @@ static int smr_progress_cmd_rma(struct smr_ep *ep, struct smr_cmd *cmd) smr_rx_cq_flags(0, cmd->hdr.op_flags), cmd->hdr.size, iov_count ? iov[0].iov_base : NULL, - cmd->hdr.id, 0, cmd->hdr.cq_data); + cmd->hdr.rx_id, 0, cmd->hdr.cq_data); } if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -1080,7 +1090,7 @@ static int smr_progress_cmd_atomic(struct smr_ep *ep, struct smr_cmd *cmd) smr_rx_cq_flags(0, cmd->hdr.op_flags), total_len, ioc_count ? ioc[0].addr : NULL, - cmd->hdr.id, 0, cmd->hdr.cq_data); + cmd->hdr.rx_id, 0, cmd->hdr.cq_data); } if (ret) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -1099,19 +1109,6 @@ static void smr_progress_cmd(struct smr_ep *ep) int ret = 0; int64_t pos; - /* ep->util_ep.lock is used to serialize the message/tag matching. - * We keep the lock until the matching is complete. This will - * ensure that commands are matched in the order they are - * received, if there are multiple progress threads. - * - * This lock should be low cost because it's only used by this - * single process. It is also optimized to be a noop if - * multi-threading is disabled. - * - * Other processes are free to post on the queue without the need - * for locking the queue. - */ - ofi_genlock_lock(&ep->util_ep.lock); while (1) { ret = smr_cmd_queue_head(smr_cmd_queue(ep->region), &ce, &pos); if (ret == -FI_ENOENT) @@ -1153,70 +1150,86 @@ static void smr_progress_cmd(struct smr_ep *ep) break; } } - ofi_genlock_unlock(&ep->util_ep.lock); } -void smr_progress_ipc_list(struct smr_ep *ep) +static void smr_progress_async_ipc(struct smr_ep *ep, + struct smr_pend_entry *ipc_entry) { - struct smr_pend_entry *ipc_entry; struct smr_domain *domain; enum fi_hmem_iface iface; - struct dlist_entry *tmp; uint64_t device; - uint64_t flags; - void *context; int ret; - + domain = container_of(ep->util_ep.domain, struct smr_domain, util_domain); - /* after the synchronize all operations should be complete */ - dlist_foreach_container_safe(&ep->ipc_cpy_pend_list, - struct smr_pend_entry, - ipc_entry, rx.entry, tmp) { - iface = ipc_entry->rx.cmd->data.ipc_info.iface; - device = ipc_entry->rx.cmd->data.ipc_info.device; - - if (ofi_async_copy_query(iface, ipc_entry->rx.async_event)) - continue; - - if (ipc_entry->rx.rx_entry) { - context = ipc_entry->rx.rx_entry->context; - flags = smr_rx_cq_flags( - ipc_entry->rx.rx_entry->flags, - ipc_entry->rx.cmd->hdr.op_flags); - } else { - context = NULL; - flags = smr_rx_cq_flags( - 0, ipc_entry->rx.cmd->hdr.op_flags); - } + iface = ipc_entry->cmd->data.ipc_info.iface; + device = ipc_entry->cmd->data.ipc_info.device; - ret = smr_complete_rx(ep, context, ipc_entry->rx.cmd->hdr.op, - flags, ipc_entry->rx.cmd->hdr.size, - ipc_entry->iov[0].iov_base, - ipc_entry->rx.cmd->hdr.id, - ipc_entry->rx.cmd->hdr.tag, - ipc_entry->rx.cmd->hdr.cq_data); - if (ret) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "unable to process rx completion\n"); - } + if (ofi_async_copy_query(iface, ipc_entry->rx.async_event)) + return; - /* indicate that the operation is completed only after we - * have confirmed that the write has finished. This is to - * ensure that the tx_complete occurs after the sending - * buffer is now free to be reused - */ + ofi_mr_cache_delete(domain->ipc_cache, ipc_entry->rx.ipc_entry); + ofi_free_async_copy_event(iface, device, ipc_entry->rx.async_event); - ofi_mr_cache_delete(domain->ipc_cache, ipc_entry->rx.ipc_entry); - ofi_free_async_copy_event(iface, device, - ipc_entry->rx.async_event); - dlist_remove(&ipc_entry->rx.entry); - if (ipc_entry->rx.rx_entry) - ep->srx->owner_ops->free_entry(ipc_entry->rx.rx_entry); - ofi_buf_free(ipc_entry); + ret = smr_complete_rx(ep, ipc_entry->comp_ctx, ipc_entry->cmd->hdr.op, + ipc_entry->comp_flags, ipc_entry->cmd->hdr.size, + ipc_entry->iov[0].iov_base, + ipc_entry->cmd->hdr.rx_id, + ipc_entry->cmd->hdr.tag, + ipc_entry->cmd->hdr.cq_data); + if (ret) { + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, + "unable to process rx completion\n"); + } + if (ipc_entry->rx.rx_entry) + ep->srx->owner_ops->free_entry(ipc_entry->rx.rx_entry); + + smr_return_cmd(ep, ipc_entry->cmd); + dlist_remove(&ipc_entry->entry); + ofi_buf_free(ipc_entry); +} + +static void smr_progress_async_sar(struct smr_ep *ep, + struct smr_pend_entry *pend) +{ + ssize_t ret; - smr_return_cmd(ep, ipc_entry->rx.cmd); + ret = pend->sar_copy_fn(ep, pend); + if (ret) { + if (ret == -FI_EAGAIN) + return; + /* -FI_EBUSY indicates copy was submitted successfully */ + if (ret == -FI_EBUSY) { + dlist_remove(&pend->entry); + return; + } + pend->cmd->hdr.status = ret; + //TODO complete in error + } +} + +void smr_progress_async_list(struct smr_ep *ep) +{ + struct smr_pend_entry *async_entry; + struct dlist_entry *tmp; + + /* after the synchronize all operations should be complete */ + dlist_foreach_container_safe(&ep->async_cpy_list, + struct smr_pend_entry, + async_entry, entry, tmp) { + switch (async_entry->cmd->hdr.proto) { + case smr_proto_ipc: + smr_progress_async_ipc(ep, async_entry); + break; + case smr_proto_sar: + smr_progress_async_sar(ep, async_entry); + break; + default: + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, + "unidentified operation type\n"); + assert(0); + } } } @@ -1226,6 +1239,20 @@ void smr_ep_progress(struct util_ep *util_ep) ep = container_of(util_ep, struct smr_ep, util_ep); + /* ep->util_ep.lock is used to serialize the message/tag matching. + * We keep the lock until the matching is complete. This will + * ensure that commands are matched in the order they are + * received, if there are multiple progress threads. + * + * This lock should be low cost because it's only used by this + * single process. It is also optimized to be a noop if + * multi-threading is disabled. + * + * Other processes are free to post on the queue without the need + * for locking the queue. + */ + ofi_genlock_lock(&ep->util_ep.lock); + if (smr_env.use_dsa_sar) smr_dsa_progress(ep); @@ -1238,5 +1265,7 @@ void smr_ep_progress(struct util_ep *util_ep) /* always drive forward the ipc list since the completion is * independent of any action by the provider */ - ep->smr_progress_ipc_list(ep); + ep->smr_progress_async_list(ep); + + ofi_genlock_unlock(&ep->util_ep.lock); } diff --git a/prov/shm/src/smr_rma.c b/prov/shm/src/smr_rma.c index bc31f30a877..bfddc86b4d0 100644 --- a/prov/shm/src/smr_rma.c +++ b/prov/shm/src/smr_rma.c @@ -41,19 +41,19 @@ static void smr_add_rma_cmd(struct smr_region *peer_smr, memcpy(cmd->rma.rma_iov, rma_iov, sizeof(*rma_iov) * iov_count); } -static void smr_format_rma_resp(struct smr_cmd *cmd, fi_addr_t peer_id, +static void smr_format_rma_resp(struct smr_cmd *cmd, int64_t peer_id, const struct fi_rma_iov *rma_iov, size_t count, size_t total_len, uint32_t op, uint64_t op_flags) { - smr_generic_format(cmd, peer_id, op, 0, 0, op_flags); + smr_generic_format(cmd, 0, peer_id, op, 0, 0, op_flags); cmd->hdr.size = total_len; } static ssize_t smr_rma_fast(struct smr_ep *ep, struct smr_region *peer_smr, const struct iovec *iov, size_t iov_count, const struct fi_rma_iov *rma_iov, size_t rma_count, - void **desc, int peer_id, int id, void *context, + void **desc, int rx_id, int tx_id, void *context, uint32_t op, uint64_t op_flags) { struct iovec vma_iovec[SMR_IOV_LIMIT], rma_iovec[SMR_IOV_LIMIT]; @@ -75,7 +75,7 @@ static ssize_t smr_rma_fast(struct smr_ep *ep, struct smr_region *peer_smr, total_len = ofi_total_iov_len(iov, iov_count); - xpmem = &smr_peer_data(ep->region)[id].xpmem; + xpmem = &smr_peer_data(ep->region)[tx_id].xpmem; ret = ofi_shm_p2p_copy(ep->p2p_type, vma_iovec, iov_count, rma_iovec, rma_count, total_len, peer_smr->pid, @@ -86,11 +86,11 @@ static ssize_t smr_rma_fast(struct smr_ep *ep, struct smr_region *peer_smr, return -FI_EAGAIN; } - smr_format_rma_resp(&ce->cmd, peer_id, rma_iov, rma_count, total_len, + smr_format_rma_resp(&ce->cmd, rx_id, rma_iov, rma_count, total_len, (op == ofi_op_write) ? ofi_op_write_async : ofi_op_read_async, op_flags); - ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); + ce->ptr = smr_peer_to_peer(ep, tx_id, (uintptr_t) &ce->cmd); smr_cmd_queue_commit(ce, pos); return FI_SUCCESS; } @@ -103,7 +103,7 @@ static ssize_t smr_generic_rma( { struct smr_domain *domain; struct smr_region *peer_smr; - int64_t id, peer_id; + int64_t tx_id, rx_id; int cmds, err = 0, proto = smr_proto_inline; ssize_t ret = 0; size_t total_len; @@ -119,25 +119,25 @@ static ssize_t smr_generic_rma( domain = container_of(ep->util_ep.domain, struct smr_domain, util_domain); - id = smr_verify_peer(ep, addr); - if (id < 0) + tx_id = smr_verify_peer(ep, addr); + if (tx_id < 0) return -FI_EAGAIN; - peer_id = smr_peer_data(ep->region)[id].id; - peer_smr = smr_peer_region(ep, id); + rx_id = smr_peer_data(ep->region)[tx_id].id; + peer_smr = smr_peer_region(ep, tx_id); cmds = 1 + !(domain->fast_rma && !(op_flags & (FI_REMOTE_CQ_DATA | FI_DELIVERY_COMPLETE)) && rma_count == 1 && smr_vma_enabled(ep, peer_smr)); - if (smr_peer_data(ep->region)[id].sar_status) + if (smr_peer_data(ep->region)[tx_id].sar_status) return -FI_EAGAIN; ofi_genlock_lock(&ep->util_ep.lock); if (cmds == 1) { err = smr_rma_fast(ep, peer_smr, iov, iov_count, rma_iov, - rma_count, desc, peer_id, id, context, op, + rma_count, desc, rx_id, tx_id, context, op, op_flags); if (err) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -170,7 +170,7 @@ static ssize_t smr_generic_rma( assert(!(op_flags & FI_INJECT) || total_len <= SMR_INJECT_SIZE); proto = smr_select_proto(desc, iov_count, smr_vma_enabled(ep, peer_smr), - smr_ipc_valid(ep, peer_smr, id, peer_id), op, + smr_ipc_valid(ep, peer_smr, tx_id, rx_id), op, total_len, op_flags); if (proto != smr_proto_inline) { if (smr_freestack_isempty(smr_cmd_stack(ep->region))) { @@ -180,12 +180,12 @@ static ssize_t smr_generic_rma( } cmd = smr_freestack_pop(smr_cmd_stack(ep->region)); assert(cmd); - ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); + ce->ptr = smr_local_to_peer(ep, tx_id, rx_id, (uintptr_t) cmd); } else { cmd = &ce->cmd; - ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); + ce->ptr = smr_peer_to_peer(ep, tx_id, (uintptr_t) &ce->cmd); } - ret = smr_send_ops[proto](ep, peer_smr, id, peer_id, op, 0, data, + ret = smr_send_ops[proto](ep, peer_smr, tx_id, rx_id, op, 0, data, op_flags, (struct ofi_mr **)desc, iov, iov_count, total_len, context, cmd); if (ret) { diff --git a/prov/shm/src/smr_util.c b/prov/shm/src/smr_util.c index dec3962759a..c02fbb3ee04 100644 --- a/prov/shm/src/smr_util.c +++ b/prov/shm/src/smr_util.c @@ -287,7 +287,7 @@ int smr_create(const struct fi_provider *prov, struct smr_map *map, sizeof(struct smr_sar_buf)); for (i = 0; i < SMR_MAX_PEERS; i++) { smr_peer_data(*smr)[i].id = -1; - smr_peer_data(*smr)[i].sar_status = 0; + smr_peer_data(*smr)[i].sar_status = SMR_SAR_FREE; smr_peer_data(*smr)[i].name_sent = 0; smr_peer_data(*smr)[i].xpmem.avail = false; } diff --git a/src/hmem.c b/src/hmem.c index a624f8dddff..7bbd7a6c80a 100644 --- a/src/hmem.c +++ b/src/hmem.c @@ -413,9 +413,9 @@ static ssize_t ofi_dev_reg_copy_hmem_iov_buf(enum fi_hmem_iface hmem_iface, uint return done; } -static ssize_t ofi_copy_mr_iov(struct ofi_mr **mr, const struct iovec *iov, - size_t iov_count, size_t offset, void *buf, - size_t size, int dir) +ssize_t ofi_copy_mr_iov(struct ofi_mr **mr, const struct iovec *iov, + size_t iov_count, size_t offset, void *buf, + size_t size, int dir) { uint64_t done = 0, len; uint64_t hmem_iface, hmem_device, hmem_flags; From 25075b8d8bceb1be3f364d18979636bc1303c3d7 Mon Sep 17 00:00:00 2001 From: Alexia Ingerson Date: Thu, 9 Jan 2025 08:58:06 -0800 Subject: [PATCH 13/13] prov/shm: bug fixes and cleanups without a category Will move Signed-off-by: Alexia Ingerson --- include/ofi_mem.h | 4 +- prov/shm/src/smr.h | 7 +-- prov/shm/src/smr_atomic.c | 9 +++- prov/shm/src/smr_ep.c | 89 ++++++++++++++----------------- prov/shm/src/smr_msg.c | 8 +-- prov/shm/src/smr_progress.c | 10 ++-- prov/shm/src/smr_rma.c | 101 ++++++++++++++++-------------------- prov/shm/src/smr_util.c | 2 +- 8 files changed, 104 insertions(+), 126 deletions(-) diff --git a/include/ofi_mem.h b/include/ofi_mem.h index d48747c10a7..3c9b00b191a 100644 --- a/include/ofi_mem.h +++ b/include/ofi_mem.h @@ -263,8 +263,8 @@ static inline void smr_freestack_push_by_offset(struct smr_freestack *fs, static inline int16_t smr_freestack_get_index(struct smr_freestack *fs, char *local_p) { - return (int16_t) (local_p - (char*) fs - fs->entry_base_offset) / - fs->object_size; + return (local_p - ((char*) fs + fs->entry_base_offset)) / + fs->object_size; } /* Push by object */ diff --git a/prov/shm/src/smr.h b/prov/shm/src/smr.h index abd559e7753..0f72b36be3f 100644 --- a/prov/shm/src/smr.h +++ b/prov/shm/src/smr.h @@ -585,9 +585,10 @@ int smr_cntr_open(struct fid_domain *domain, struct fi_cntr_attr *attr, int64_t smr_verify_peer(struct smr_ep *ep, fi_addr_t fi_addr); -void smr_format_tx_pend(struct smr_pend_entry *pend, void *context, - struct ofi_mr **mr, const struct iovec *iov, - uint32_t iov_count, uint64_t op_flags); +void smr_format_tx_pend(struct smr_pend_entry *pend, struct smr_cmd *cmd, + void *context, struct ofi_mr **mr, + const struct iovec *iov, uint32_t iov_count, + uint64_t op_flags); void smr_generic_format(struct smr_cmd *cmd, int64_t tx_id, int64_t rx_id, uint32_t op, uint64_t tag, uint64_t data, uint64_t op_flags); diff --git a/prov/shm/src/smr_atomic.c b/prov/shm/src/smr_atomic.c index 188f96bbca4..ff0236a6f1c 100644 --- a/prov/shm/src/smr_atomic.c +++ b/prov/shm/src/smr_atomic.c @@ -136,7 +136,7 @@ static ssize_t smr_do_atomic_inject( pend = ofi_buf_alloc(ep->pend_pool); assert(pend); cmd->hdr.tx_ctx = (uintptr_t) pend; - smr_format_tx_pend(pend, context, res_desc, resultv, + smr_format_tx_pend(pend, cmd, context, res_desc, resultv, result_count, op_flags); } else { cmd->hdr.tx_ctx = 0; @@ -345,6 +345,7 @@ static ssize_t smr_atomic_inject(struct fid_ep *ep_fid, const void *buf, int64_t id, peer_id, pos; ssize_t ret = -FI_EAGAIN; size_t total_len; + int proto; ep = container_of(ep_fid, struct smr_ep, util_ep.ep_fid.fid); @@ -379,12 +380,14 @@ static ssize_t smr_atomic_inject(struct fid_ep *ep_fid, const void *buf, rma_ioc.key = key; if (total_len <= SMR_MSG_DATA_LEN) { + proto = smr_proto_inline; cmd = &ce->cmd; ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) cmd); smr_do_atomic_inline(ep, peer_smr, id, peer_id, ofi_op_atomic, 0, datatype, op, NULL, &iov, 1, total_len, &ce->cmd); } else { + proto = smr_proto_inject; if (smr_freestack_isempty(smr_cmd_stack(ep->region))) { smr_cmd_queue_discard(ce, pos); ret = -FI_EAGAIN; @@ -406,7 +409,9 @@ static ssize_t smr_atomic_inject(struct fid_ep *ep_fid, const void *buf, smr_format_rma_ioc(cmd, &rma_ioc, 1); smr_cmd_queue_commit(ce, pos); - ofi_ep_peer_tx_cntr_inc(&ep->util_ep, ofi_op_atomic); + + if (proto == smr_proto_inline) + ofi_ep_peer_tx_cntr_inc(&ep->util_ep, ofi_op_atomic); unlock: ofi_genlock_unlock(&ep->util_ep.lock); return ret; diff --git a/prov/shm/src/smr_ep.c b/prov/shm/src/smr_ep.c index 93fc7d696d2..b4668c85d6f 100644 --- a/prov/shm/src/smr_ep.c +++ b/prov/shm/src/smr_ep.c @@ -214,11 +214,13 @@ int64_t smr_verify_peer(struct smr_ep *ep, fi_addr_t fi_addr) return -1; } -void smr_format_tx_pend(struct smr_pend_entry *pend, void *context, - struct ofi_mr **mr, const struct iovec *iov, - uint32_t iov_count, uint64_t op_flags) +void smr_format_tx_pend(struct smr_pend_entry *pend, struct smr_cmd *cmd, + void *context, struct ofi_mr **mr, + const struct iovec *iov, uint32_t iov_count, + uint64_t op_flags) { pend->type = SMR_TX_ENTRY; + pend->cmd = cmd; pend->comp_ctx = context; pend->comp_flags = op_flags; @@ -259,30 +261,33 @@ static void smr_format_inline(struct smr_cmd *cmd, struct ofi_mr **mr, mr, iov, count, 0); } -static void smr_format_inject(struct smr_cmd *cmd, struct ofi_mr **mr, - const struct iovec *iov, size_t count, - struct smr_region *smr) + +static void smr_format_inject(struct smr_ep *ep, struct smr_cmd *cmd, + struct smr_pend_entry *pend) { struct smr_inject_buf *tx_buf; - tx_buf = smr_get_inject_buf(smr, cmd); + tx_buf = smr_get_inject_buf(ep->region, cmd); cmd->hdr.proto = smr_proto_inject; - if (cmd->hdr.op != ofi_op_read_req) + if (cmd->hdr.op != ofi_op_read_req) { cmd->hdr.size = ofi_copy_from_mr_iov(tx_buf->data, SMR_INJECT_SIZE, - mr, iov, count, 0); - else - cmd->hdr.size = ofi_total_iov_len(iov, count); + pend->mr, pend->iov, + pend->iov_count, 0); + pend->bytes_done = cmd->hdr.size; + } else { + cmd->hdr.size = ofi_total_iov_len(pend->iov, pend->iov_count); + pend->bytes_done = 0; + } } -static void smr_format_iov(struct smr_cmd *cmd, const struct iovec *iov, - size_t count, size_t total_len, struct smr_region *smr) +static void smr_format_iov(struct smr_cmd *cmd, struct smr_pend_entry *pend) { cmd->hdr.proto = smr_proto_iov; - cmd->data.iov_count = count; - cmd->hdr.size = total_len; - memcpy(cmd->data.iov, iov, sizeof(*iov) * count); + cmd->data.iov_count = pend->iov_count; + cmd->hdr.size = ofi_total_iov_len(pend->iov, pend->iov_count); + memcpy(cmd->data.iov, pend->iov, sizeof(*pend->iov) * pend->iov_count); } static int smr_format_ipc(struct smr_cmd *cmd, void *ptr, size_t len, @@ -367,8 +372,8 @@ static int smr_format_sar(struct smr_ep *ep, struct smr_cmd *cmd, //probably remove this - we should never have a 0 byte SAR now that //injects can be delivery complete /* Nothing to copy for 0 byte transfer */ - // if (!cmd->hdr.size) - // goto out; + // if (!cmd->hdr.size) + // goto out; if (cmd->hdr.op != ofi_op_read_req) { ret = pend->sar_copy_fn(ep, pend); @@ -427,10 +432,9 @@ int smr_select_proto(void **desc, size_t iov_count, bool vma_avail, return total_len <= SMR_MSG_DATA_LEN ? smr_proto_inline : smr_proto_inject; - if (op_flags & FI_INJECT) { - assert(total_len <= SMR_INJECT_SIZE); + if (op_flags & FI_INJECT || total_len <= SMR_INJECT_SIZE) { if (op_flags & FI_DELIVERY_COMPLETE) - return smr_proto_sar; + return smr_proto_inject; return total_len <= SMR_MSG_DATA_LEN ? smr_proto_inline : smr_proto_inject; } @@ -438,20 +442,7 @@ int smr_select_proto(void **desc, size_t iov_count, bool vma_avail, if (use_ipc) return smr_proto_ipc; - if (total_len > SMR_INJECT_SIZE) - return vma_avail ? smr_proto_iov: smr_proto_sar; - - if (op_flags & FI_DELIVERY_COMPLETE) - return smr_proto_sar; - - if (total_len <= SMR_MSG_DATA_LEN) - return smr_proto_inline; - - if (total_len <= SMR_INJECT_SIZE) - return smr_proto_inject; - - return smr_proto_sar; - + return vma_avail ? smr_proto_iov: smr_proto_sar; } static ssize_t smr_do_inline(struct smr_ep *ep, struct smr_region *peer_smr, @@ -477,19 +468,14 @@ static ssize_t smr_do_inject(struct smr_ep *ep, struct smr_region *peer_smr, { struct smr_pend_entry *pend; - if (op == ofi_op_read_req) { - pend = ofi_buf_alloc(ep->pend_pool); - assert(pend); + pend = ofi_buf_alloc(ep->pend_pool); + assert(pend); - cmd->hdr.tx_ctx = (uintptr_t) pend; - smr_format_tx_pend(pend, context, desc, iov, iov_count, - op_flags); - } else { - cmd->hdr.tx_ctx = 0; - } + cmd->hdr.tx_ctx = (uintptr_t) pend; + smr_format_tx_pend(pend, cmd, context, desc, iov, iov_count, op_flags); smr_generic_format(cmd, tx_id, rx_id, op, tag, data, op_flags); - smr_format_inject(cmd, desc, iov, iov_count, ep->region); + smr_format_inject(ep, cmd, pend); return FI_SUCCESS; } @@ -507,10 +493,10 @@ static ssize_t smr_do_iov(struct smr_ep *ep, struct smr_region *peer_smr, assert(pend); cmd->hdr.tx_ctx = (uintptr_t) pend; - smr_generic_format(cmd, tx_id, rx_id, op, tag, data, op_flags); - smr_format_iov(cmd, iov, iov_count, total_len, ep->region); + smr_format_tx_pend(pend, cmd, context, desc, iov, iov_count, op_flags); - smr_format_tx_pend(pend, context, desc, iov, iov_count, op_flags); + smr_generic_format(cmd, tx_id, rx_id, op, tag, data, op_flags); + smr_format_iov(cmd, pend); return FI_SUCCESS; } @@ -529,8 +515,9 @@ static ssize_t smr_do_sar(struct smr_ep *ep, struct smr_region *peer_smr, assert(pend); cmd->hdr.tx_ctx = (uintptr_t) pend; - smr_format_tx_pend(pend, context, desc, iov, iov_count, op_flags); - pend->sar_dir = pend->cmd->hdr.op == ofi_op_read_req ? + smr_format_tx_pend(pend, cmd, context, desc, iov, iov_count, op_flags); + + pend->sar_dir = op == ofi_op_read_req ? OFI_COPY_BUF_TO_IOV : OFI_COPY_IOV_TO_BUF; if (smr_env.use_dsa_sar && ofi_mr_all_host(pend->mr, pend->iov_count)) @@ -576,7 +563,7 @@ static ssize_t smr_do_ipc(struct smr_ep *ep, struct smr_region *peer_smr, total_len, context, cmd); } - smr_format_tx_pend(pend, context, desc, iov, iov_count, op_flags); + smr_format_tx_pend(pend, cmd, context, desc, iov, iov_count, op_flags); return FI_SUCCESS; } diff --git a/prov/shm/src/smr_msg.c b/prov/shm/src/smr_msg.c index 3581a2ab752..5aafa73907c 100644 --- a/prov/shm/src/smr_msg.c +++ b/prov/shm/src/smr_msg.c @@ -137,7 +137,7 @@ static ssize_t smr_generic_sendmsg(struct smr_ep *ep, const struct iovec *iov, } smr_cmd_queue_commit(ce, pos); - if (proto != smr_proto_inline && proto != smr_proto_inject) + if (proto != smr_proto_inline) goto unlock; ret = smr_complete_tx(ep, context, op, op_flags); @@ -230,8 +230,6 @@ static ssize_t smr_generic_inject(struct fid_ep *ep_fid, const void *buf, goto unlock; } - ce->ptr = smr_peer_to_peer(ep, tx_id, (uintptr_t) &ce->cmd); - if (len <= SMR_MSG_DATA_LEN) { proto = smr_proto_inline; cmd = &ce->cmd; @@ -257,7 +255,9 @@ static ssize_t smr_generic_inject(struct fid_ep *ep_fid, const void *buf, goto unlock; } smr_cmd_queue_commit(ce, pos); - ofi_ep_peer_tx_cntr_inc(&ep->util_ep, op); + + if (proto == smr_proto_inline) + ofi_ep_peer_tx_cntr_inc(&ep->util_ep, op); unlock: ofi_genlock_unlock(&ep->util_ep.lock); diff --git a/prov/shm/src/smr_progress.c b/prov/shm/src/smr_progress.c index d82936d81f0..752a96ad51b 100644 --- a/prov/shm/src/smr_progress.c +++ b/prov/shm/src/smr_progress.c @@ -137,7 +137,7 @@ static int smr_progress_return_entry(struct smr_ep *ep, struct smr_cmd *cmd, pending->mr, pending->iov, pending->iov_count, - 0, src,cmd->hdr.size); + 0, src, cmd->hdr.size); if (hmem_copy_ret < 0) { FI_WARN(&smr_prov, FI_LOG_EP_CTRL, @@ -434,10 +434,6 @@ ssize_t smr_progress_sar(struct smr_ep *ep, struct smr_cmd *cmd, struct iovec sar_iov[SMR_IOV_LIMIT]; ssize_t ret = FI_SUCCESS; - /* Nothing to do for 0 byte transfer */ - if (!cmd->hdr.size) - goto out; - memcpy(sar_iov, iov, sizeof(*iov) * iov_count); (void) ofi_truncate_iov(sar_iov, &iov_count, cmd->hdr.size); @@ -458,7 +454,7 @@ ssize_t smr_progress_sar(struct smr_ep *ep, struct smr_cmd *cmd, cmd->hdr.rx_ctx = 0; ofi_buf_free(pend); } -out: + smr_return_cmd(ep, cmd); return ret; } @@ -838,7 +834,7 @@ static void smr_progress_connreq(struct smr_ep *ep, struct smr_cmd *cmd) } smr_set_ipc_valid(ep, idx); - smr_peer_data(peer_smr)[cmd->hdr.rx_id].id = idx; + smr_peer_data(peer_smr)[cmd->hdr.tx_id].id = idx; smr_peer_data(ep->region)[idx].id = cmd->hdr.rx_id; assert(ep->map->num_peers > 0); diff --git a/prov/shm/src/smr_rma.c b/prov/shm/src/smr_rma.c index bfddc86b4d0..16f96c1ce73 100644 --- a/prov/shm/src/smr_rma.c +++ b/prov/shm/src/smr_rma.c @@ -82,6 +82,9 @@ static ssize_t smr_rma_fast(struct smr_ep *ep, struct smr_region *peer_smr, op == ofi_op_write, xpmem); if (ret) { + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, "error doing fast RMA\n"); + ret = smr_write_err_comp(ep->util_ep.rx_cq, NULL, op_flags, 0, + ret); smr_cmd_queue_discard(ce, pos); return -FI_EAGAIN; } @@ -92,19 +95,41 @@ static ssize_t smr_rma_fast(struct smr_ep *ep, struct smr_region *peer_smr, ce->ptr = smr_peer_to_peer(ep, tx_id, (uintptr_t) &ce->cmd); smr_cmd_queue_commit(ce, pos); + + ret = smr_complete_tx(ep, context, op, op_flags); + if (ret) { + FI_WARN(&smr_prov, FI_LOG_EP_CTRL, + "unable to process tx completion\n"); + } + return FI_SUCCESS; } +static inline bool smr_do_fast_rma(struct smr_ep *ep, uint32_t op_flags, + size_t rma_count, size_t total_len, + struct smr_region *peer_smr) +{ + struct smr_domain *domain; + + domain = container_of(ep->util_ep.domain, struct smr_domain, + util_domain); + + return domain->fast_rma && !(op_flags & + (FI_REMOTE_CQ_DATA | FI_DELIVERY_COMPLETE)) && + rma_count == 1 && smr_vma_enabled(ep, peer_smr) && + total_len > SMR_INJECT_SIZE; + +} + static ssize_t smr_generic_rma( struct smr_ep *ep, const struct iovec *iov, size_t iov_count, const struct fi_rma_iov *rma_iov, size_t rma_count, void **desc, fi_addr_t addr, void *context, uint32_t op, uint64_t data, uint64_t op_flags) { - struct smr_domain *domain; struct smr_region *peer_smr; int64_t tx_id, rx_id; - int cmds, err = 0, proto = smr_proto_inline; + int proto = smr_proto_inline; ssize_t ret = 0; size_t total_len; struct smr_cmd_entry *ce; @@ -116,9 +141,6 @@ static ssize_t smr_generic_rma( assert(ofi_total_iov_len(iov, iov_count) == ofi_total_rma_iov_len(rma_iov, rma_count)); - domain = container_of(ep->util_ep.domain, struct smr_domain, - util_domain); - tx_id = smr_verify_peer(ep, addr); if (tx_id < 0) return -FI_EAGAIN; @@ -126,37 +148,17 @@ static ssize_t smr_generic_rma( rx_id = smr_peer_data(ep->region)[tx_id].id; peer_smr = smr_peer_region(ep, tx_id); - cmds = 1 + !(domain->fast_rma && !(op_flags & - (FI_REMOTE_CQ_DATA | FI_DELIVERY_COMPLETE)) && - rma_count == 1 && smr_vma_enabled(ep, peer_smr)); - if (smr_peer_data(ep->region)[tx_id].sar_status) return -FI_EAGAIN; ofi_genlock_lock(&ep->util_ep.lock); - if (cmds == 1) { - err = smr_rma_fast(ep, peer_smr, iov, iov_count, rma_iov, + total_len = ofi_total_iov_len(iov, iov_count); + //TODO move this to proto + if (smr_do_fast_rma(ep, op_flags, rma_count, total_len, peer_smr)) { + ret = smr_rma_fast(ep, peer_smr, iov, iov_count, rma_iov, rma_count, desc, rx_id, tx_id, context, op, op_flags); - if (err) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "error doing fast RMA\n"); - if (err == -FI_EAGAIN) { - ret = -FI_EAGAIN; - goto unlock; - } - - ret = smr_write_err_comp(ep->util_ep.rx_cq, NULL, - op_flags, 0, -err); - } else { - ret = smr_complete_tx(ep, context, op, op_flags); - } - - if (ret) { - FI_WARN(&smr_prov, FI_LOG_EP_CTRL, - "unable to process tx completion\n"); - } goto unlock; } @@ -166,7 +168,6 @@ static ssize_t smr_generic_rma( goto unlock; } - total_len = ofi_total_iov_len(iov, iov_count); assert(!(op_flags & FI_INJECT) || total_len <= SMR_INJECT_SIZE); proto = smr_select_proto(desc, iov_count, smr_vma_enabled(ep, peer_smr), @@ -198,8 +199,7 @@ static ssize_t smr_generic_rma( smr_add_rma_cmd(peer_smr, rma_iov, rma_count, cmd); smr_cmd_queue_commit(ce, pos); - if ((proto != smr_proto_inline && proto != smr_proto_inject) || - (op == ofi_op_read_req)) + if (proto != smr_proto_inline || op == ofi_op_read_req) goto unlock; ret = smr_complete_tx(ep, context, op, op_flags); @@ -325,12 +325,11 @@ static ssize_t smr_generic_rma_inject(struct fid_ep *ep_fid, const void *buf, uint64_t data, uint64_t flags) { struct smr_ep *ep; - struct smr_domain *domain; struct smr_region *peer_smr; struct iovec iov; struct fi_rma_iov rma_iov; - int64_t id, peer_id; - int cmds, proto = smr_proto_inline; + int64_t tx_id, rx_id; + int proto = smr_proto_inline; ssize_t ret = 0; struct smr_cmd *cmd; struct smr_cmd_entry *ce; @@ -338,20 +337,15 @@ static ssize_t smr_generic_rma_inject(struct fid_ep *ep_fid, const void *buf, assert(len <= SMR_INJECT_SIZE); ep = container_of(ep_fid, struct smr_ep, util_ep.ep_fid.fid); - domain = container_of(ep->util_ep.domain, struct smr_domain, - util_domain); - id = smr_verify_peer(ep, dest_addr); - if (id < 0) + tx_id = smr_verify_peer(ep, dest_addr); + if (tx_id < 0) return -FI_EAGAIN; - peer_id = smr_peer_data(ep->region)[id].id; - peer_smr = smr_peer_region(ep, id); - - cmds = 1 + !(domain->fast_rma && !(flags & FI_REMOTE_CQ_DATA) && - smr_vma_enabled(ep, peer_smr)); + rx_id = smr_peer_data(ep->region)[tx_id].id; + peer_smr = smr_peer_region(ep, tx_id); - if (smr_peer_data(ep->region)[id].sar_status) + if (smr_peer_data(ep->region)[tx_id].sar_status) return -FI_EAGAIN; iov.iov_base = (void *) buf; @@ -362,12 +356,6 @@ static ssize_t smr_generic_rma_inject(struct fid_ep *ep_fid, const void *buf, ofi_genlock_lock(&ep->util_ep.lock); - if (cmds == 1) { - ret = smr_rma_fast(ep, peer_smr, &iov, 1, &rma_iov, 1, NULL, - peer_id, id, NULL, ofi_op_write, flags); - goto out; - } - ret = smr_cmd_queue_next(smr_cmd_queue(peer_smr), &ce, &pos); if (ret == -FI_ENOENT) return -FI_EAGAIN; @@ -375,7 +363,7 @@ static ssize_t smr_generic_rma_inject(struct fid_ep *ep_fid, const void *buf, if (len <= SMR_MSG_DATA_LEN) { proto = smr_proto_inline; cmd = &ce->cmd; - ce->ptr = smr_peer_to_peer(ep, id, (uintptr_t) &ce->cmd); + ce->ptr = smr_peer_to_peer(ep, tx_id, (uintptr_t) &ce->cmd); } else { proto = smr_proto_inject; if (smr_freestack_isempty(smr_cmd_stack(ep->region))) { @@ -386,20 +374,21 @@ static ssize_t smr_generic_rma_inject(struct fid_ep *ep_fid, const void *buf, cmd = smr_freestack_pop(smr_cmd_stack(ep->region)); assert(cmd); - ce->ptr = smr_local_to_peer(ep, id, peer_id, (uintptr_t) cmd); + ce->ptr = smr_local_to_peer(ep, tx_id, rx_id, (uintptr_t) cmd); } - ret = smr_send_ops[proto](ep, peer_smr, id, peer_id, ofi_op_write, 0, + ret = smr_send_ops[proto](ep, peer_smr, tx_id, rx_id, ofi_op_write, 0, data, flags, NULL, &iov, 1, len, NULL, cmd); if (ret) { + if (proto != smr_proto_inline) + smr_freestack_push(smr_cmd_stack(ep->region), cmd); smr_cmd_queue_discard(ce, pos); goto unlock; } smr_add_rma_cmd(peer_smr, &rma_iov, 1, cmd); smr_cmd_queue_commit(ce, pos); -out: - if (!ret) + if (proto == smr_proto_inline) ofi_ep_peer_tx_cntr_inc(&ep->util_ep, ofi_op_write); unlock: ofi_genlock_unlock(&ep->util_ep.lock); diff --git a/prov/shm/src/smr_util.c b/prov/shm/src/smr_util.c index c02fbb3ee04..b1b345c1613 100644 --- a/prov/shm/src/smr_util.c +++ b/prov/shm/src/smr_util.c @@ -282,7 +282,7 @@ int smr_create(const struct fi_provider *prov, struct smr_map *map, smr_return_queue_init(smr_return_queue(*smr), tx_size); smr_freestack_init(smr_cmd_stack(*smr), tx_size, - sizeof(struct smr_cmd_entry)); + sizeof(struct smr_cmd)); smr_freestack_init(smr_sar_pool(*smr), SMR_MAX_PEERS, sizeof(struct smr_sar_buf)); for (i = 0; i < SMR_MAX_PEERS; i++) {