diff --git a/src/ucs/memory/rcache.c b/src/ucs/memory/rcache.c index fde5352a4d3..7cf2ae8c50a 100644 --- a/src/ucs/memory/rcache.c +++ b/src/ucs/memory/rcache.c @@ -188,7 +188,7 @@ static void ucs_rcache_region_collect_callback(const ucs_pgtable_t *pgtable, { ucs_rcache_region_t *region = ucs_derived_of(pgt_region, ucs_rcache_region_t); ucs_list_link_t *list = arg; - ucs_list_add_tail(list, ®ion->list); + ucs_list_add_tail(list, ®ion->tmp_list); } /* Lock must be held */ @@ -200,6 +200,52 @@ static void ucs_rcache_find_regions(ucs_rcache_t *rcache, ucs_pgt_addr_t from, ucs_rcache_region_collect_callback, list); } +/* LRU spinlock must be held */ +static inline void +ucs_rcache_region_lru_add(ucs_rcache_t *rcache, ucs_rcache_region_t *region) +{ + if (region->lru_flag) { + return; + } + + ucs_rcache_region_trace(rcache, region, "lru add"); + ucs_list_add_tail(&rcache->lru.list, ®ion->lru_list); + ++rcache->lru.count; + region->lru_flag = 1; +} + +/* LRU spinlock must be held */ +static inline void +ucs_rcache_region_lru_remove(ucs_rcache_t *rcache, ucs_rcache_region_t *region) +{ + if (!region->lru_flag) { + return; + } + + ucs_rcache_region_trace(rcache, region, "lru remove"); + ucs_list_del(®ion->lru_list); + --rcache->lru.count; + region->lru_flag = 0; +} + +static void ucs_rcache_region_lru_get(ucs_rcache_t *rcache, + ucs_rcache_region_t *region) +{ + /* A used region cannot be evicted */ + ucs_spin_lock(&rcache->lru.lock); + ucs_rcache_region_lru_remove(rcache, region); + ucs_spin_unlock(&rcache->lru.lock); +} + +static void ucs_rcache_region_lru_put(ucs_rcache_t *rcache, + ucs_rcache_region_t *region) +{ + /* When we finish using a region, it's a candidate for LRU eviction */ + ucs_spin_lock(&rcache->lru.lock); + ucs_rcache_region_lru_add(rcache, region); + ucs_spin_unlock(&rcache->lru.lock); +} + /* Lock must be held in write mode */ static void ucs_mem_region_destroy_internal(ucs_rcache_t *rcache, ucs_rcache_region_t *region) @@ -216,6 +262,13 @@ static void ucs_mem_region_destroy_internal(ucs_rcache_t *rcache, } } + ucs_spin_lock(&rcache->lru.lock); + ucs_rcache_region_lru_remove(rcache, region); + ucs_spin_unlock(&rcache->lru.lock); + + --rcache->num_regions; + rcache->total_size -= region->super.end - region->super.start; + ucs_free(region); } @@ -275,7 +328,7 @@ static void ucs_rcache_invalidate_range(ucs_rcache_t *rcache, ucs_pgt_addr_t sta ucs_trace_func("rcache=%s, start=0x%lx, end=0x%lx", rcache->name, start, end); ucs_rcache_find_regions(rcache, start, end - 1, ®ion_list); - ucs_list_for_each_safe(region, tmp, ®ion_list, list) { + ucs_list_for_each_safe(region, tmp, ®ion_list, tmp_list) { /* all regions on the list are in the page table */ ucs_rcache_region_invalidate(rcache, region, 1, 0); UCS_STATS_UPDATE_COUNTER(rcache->stats, UCS_RCACHE_UNMAP_INVALIDATES, 1); @@ -360,7 +413,7 @@ static void ucs_rcache_purge(ucs_rcache_t *rcache) ucs_list_head_init(®ion_list); ucs_pgtable_purge(&rcache->pgtable, ucs_rcache_region_collect_callback, ®ion_list); - ucs_list_for_each_safe(region, tmp, ®ion_list, list) { + ucs_list_for_each_safe(region, tmp, ®ion_list, tmp_list) { if (region->flags & UCS_RCACHE_REGION_FLAG_PGTABLE) { region->flags &= ~UCS_RCACHE_REGION_FLAG_PGTABLE; ucs_atomic_add32(®ion->refcount, (uint32_t)-1); @@ -372,6 +425,51 @@ static void ucs_rcache_purge(ucs_rcache_t *rcache) } } +/* Lock must be held in write mode */ +static void ucs_rcache_lru_evict(ucs_rcache_t *rcache) +{ + int num_evicted, num_skipped; + ucs_rcache_region_t *region; + + num_evicted = 0; + num_skipped = 0; + + ucs_spin_lock(&rcache->lru.lock); + while (!ucs_list_is_empty(&rcache->lru.list) && + ((rcache->num_regions > rcache->params.max_regions) || + (rcache->total_size > rcache->params.max_size))) { + + region = ucs_list_head(&rcache->lru.list, ucs_rcache_region_t, lru_list); + ucs_assert(region->lru_flag); + + if (!(region->flags & UCS_RCACHE_REGION_FLAG_PGTABLE) || + (region->refcount > 1)) { + /* region is in use or not in page table - remove from lru */ + ucs_rcache_region_lru_remove(rcache, region); + ++num_skipped; + continue; + } + + ucs_spin_unlock(&rcache->lru.lock); + + /* we expect the region to have refcount=1 and present in pgt so it + * would be destroyed immediately by this function + */ + ucs_rcache_region_trace(rcache, region, "evict"); + ucs_rcache_region_invalidate(rcache, region, 1, 1); + ++num_evicted; + + ucs_spin_lock(&rcache->lru.lock); + } + ucs_spin_unlock(&rcache->lru.lock); + + if (num_evicted > 0) { + ucs_debug("evicted %d regions, skipped %d regions, usage: %lu (%lu)", + num_evicted, num_skipped, rcache->num_regions, + rcache->params.max_regions); + } +} + static inline int ucs_rcache_region_test(ucs_rcache_region_t *region, int prot) { return (region->flags & UCS_RCACHE_REGION_FLAG_REGISTERED) && @@ -397,7 +495,7 @@ ucs_rcache_check_overlap(ucs_rcache_t *rcache, ucs_pgt_addr_t *start, /* TODO check if any of the regions is locked */ - ucs_list_for_each_safe(region, tmp, ®ion_list, list) { + ucs_list_for_each_safe(region, tmp, ®ion_list, tmp_list) { if ((*start >= region->super.start) && (*end <= region->super.end) && ucs_rcache_region_test(region, *prot)) @@ -535,12 +633,19 @@ ucs_rcache_create_region(ucs_rcache_t *rcache, void *address, size_t length, region->prot = prot; region->flags = UCS_RCACHE_REGION_FLAG_PGTABLE; + region->lru_flag = 0; region->refcount = 1; + region->status = UCS_INPROGRESS; + + ++rcache->num_regions; + rcache->total_size += region->super.end - region->super.start; + region->status = status = UCS_PROFILE_NAMED_CALL("mem_reg", rcache->params.ops->mem_reg, rcache->params.context, rcache, arg, region, merged ? UCS_RCACHE_MEM_REG_HIDE_ERRORS : 0); if (status != UCS_OK) { + ucs_rcache_region_invalidate(rcache, region, 1, 1); if (merged) { /* failure may be due to merge, because memory of the merged * regions has different access permission. @@ -550,7 +655,6 @@ ucs_rcache_create_region(ucs_rcache_t *rcache, void *address, size_t length, */ ucs_debug("failed to register merged region " UCS_PGT_REGION_FMT ": %s, retrying", UCS_PGT_REGION_ARG(®ion->super), ucs_status_string(status)); - ucs_rcache_region_invalidate(rcache, region, 1, 1); goto retry; } else { ucs_debug("failed to register region " UCS_PGT_REGION_FMT ": %s", @@ -562,6 +666,8 @@ ucs_rcache_create_region(ucs_rcache_t *rcache, void *address, size_t length, region->flags |= UCS_RCACHE_REGION_FLAG_REGISTERED; region->refcount = 2; /* Page-table + user */ + ucs_rcache_lru_evict(rcache); + if (ucs_global_opts.rcache_check_pfn) { ucs_rcache_region_pfn(region) = ucs_sys_get_pfn(region->super.start); } else { @@ -607,6 +713,7 @@ ucs_status_t ucs_rcache_get(ucs_rcache_t *rcache, void *address, size_t length, { ucs_rcache_region_hold(rcache, region); ucs_rcache_region_validate_pfn(rcache, region); + ucs_rcache_region_lru_get(rcache, region); *region_p = region; UCS_STATS_UPDATE_COUNTER(rcache->stats, UCS_RCACHE_HITS_FAST, 1); pthread_rwlock_unlock(&rcache->lock); @@ -627,6 +734,7 @@ ucs_status_t ucs_rcache_get(ucs_rcache_t *rcache, void *address, size_t length, void ucs_rcache_region_put(ucs_rcache_t *rcache, ucs_rcache_region_t *region) { + ucs_rcache_region_lru_put(rcache, region); ucs_rcache_region_put_internal(rcache, region, 1, 0); UCS_STATS_UPDATE_COUNTER(rcache->stats, UCS_RCACHE_PUTS, 1); } @@ -692,6 +800,11 @@ static UCS_CLASS_INIT_FUNC(ucs_rcache_t, const ucs_rcache_params_t *params, } ucs_queue_head_init(&self->inv_q); + self->lru.count = 0; + self->num_regions = 0; + self->total_size = 0; + ucs_list_head_init(&self->lru.list); + ucs_spinlock_init(&self->lru.lock, 0); status = ucm_set_event_handler(params->ucm_events, params->ucm_event_priority, ucs_rcache_unmapped_callback, self); @@ -729,6 +842,20 @@ static UCS_CLASS_CLEANUP_FUNC(ucs_rcache_t) ucs_rcache_check_inv_queue(self); ucs_rcache_purge(self); + if (self->lru.count > 0) { + ucs_assert(!ucs_list_is_empty(&self->lru.list)); + ucs_warn("%lu regions remained on lru list, first region: %p", + self->lru.count, + ucs_list_head(&self->lru.list, ucs_rcache_region_t, lru_list)); + } else { + ucs_assert(ucs_list_is_empty(&self->lru.list)); + } + + status = ucs_spinlock_destroy(&self->lru.lock); + if (status != UCS_OK) { + ucs_warn("ucs_spinlock_destroy() failed (%d)", status); + } + ucs_mpool_cleanup(&self->inv_mp, 1); ucs_pgtable_cleanup(&self->pgtable); status = ucs_recursive_spinlock_destroy(&self->inv_lock); diff --git a/src/ucs/memory/rcache.h b/src/ucs/memory/rcache.h index d9db909b227..995c79d51cb 100644 --- a/src/ucs/memory/rcache.h +++ b/src/ucs/memory/rcache.h @@ -113,17 +113,21 @@ struct ucs_rcache_params { const ucs_rcache_ops_t *ops; /**< Memory operations functions */ void *context; /**< User-defined context that will be passed to mem_reg/mem_dereg */ + unsigned long max_regions; /**< Maximal number of regions */ + size_t max_size; /**< Maximal total size of regions */ }; struct ucs_rcache_region { ucs_pgt_region_t super; /**< Base class - page table region */ - ucs_list_link_t list; /**< List element */ + ucs_list_link_t lru_list; /**< LRU list element */ + ucs_list_link_t tmp_list; /**< Temp list element */ volatile uint32_t refcount; /**< Reference count, including +1 if it's in the page table */ ucs_status_t status; /**< Current status code */ uint8_t prot; /**< Protection bits */ - uint16_t flags; /**< Status flags. Protected by page table lock. */ + uint8_t flags; /**< Status flags. Protected by page table lock. */ + uint8_t lru_flag; uint64_t priv; /**< Used internally */ }; diff --git a/src/ucs/memory/rcache_int.h b/src/ucs/memory/rcache_int.h index 26bfb8e7a41..2a144adbc2c 100644 --- a/src/ucs/memory/rcache_int.h +++ b/src/ucs/memory/rcache_int.h @@ -7,8 +7,10 @@ #ifndef UCS_REG_CACHE_INT_H_ #define UCS_REG_CACHE_INT_H_ +#include #include + /* Names of rcache stats counters */ enum { UCS_RCACHE_GETS, /* number of get operations */ @@ -42,7 +44,20 @@ struct ucs_rcache { since we cannot use regulat malloc(). The backing storage is original mmap() which does not generate memory events */ - char *name; + unsigned long num_regions;/**< Total number of managed regions */ + size_t total_size; /**< Total size of registered memory */ + + struct { + ucs_spinlock_t lock; /**< Lock for this structure */ + ucs_list_link_t list; /**< List of regions, sorted by usage: + The head of the list is the least + recently used region, and the tail + is the most recently used region. */ + unsigned long count; /**< Number of regions on list */ + } lru; + + char *name; /**< Name for debug purposes */ + UCS_STATS_NODE_DECLARE(stats) }; diff --git a/src/uct/base/uct_md.c b/src/uct/base/uct_md.c index 1f7393d5ab4..bdafc06e168 100644 --- a/src/uct/base/uct_md.c +++ b/src/uct/base/uct_md.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,14 @@ ucs_config_field_t uct_md_config_rcache_table[] = { "between "UCS_PP_MAKE_STRING(UCS_PGT_ADDR_ALIGN)"and system page size", ucs_offsetof(uct_md_rcache_config_t, alignment), UCS_CONFIG_TYPE_UINT}, + {"RCACHE_MAX_REGIONS", "inf", + "Maximal number of regions in the registration cache", + ucs_offsetof(uct_md_rcache_config_t, max_regions), UCS_CONFIG_TYPE_ULUNITS}, + + {"RCACHE_MAX_SIZE", "inf", + "Maximal total size of registration cache regions", + ucs_offsetof(uct_md_rcache_config_t, max_size), UCS_CONFIG_TYPE_MEMUNITS}, + {NULL} }; @@ -435,3 +444,12 @@ ucs_status_t uct_md_detect_memory_type(uct_md_h md, const void *addr, size_t len { return md->ops->detect_memory_type(md, addr, length, mem_type_p); } + +void uct_md_set_rcache_params(ucs_rcache_params_t *rcache_params, + const uct_md_rcache_config_t *rcache_config) +{ + rcache_params->alignment = rcache_config->alignment; + rcache_params->ucm_event_priority = rcache_config->event_prio; + rcache_params->max_regions = rcache_config->max_regions; + rcache_params->max_size = rcache_config->max_size; +} diff --git a/src/uct/base/uct_md.h b/src/uct/base/uct_md.h index 804bf748422..7754c457e63 100644 --- a/src/uct/base/uct_md.h +++ b/src/uct/base/uct_md.h @@ -22,6 +22,8 @@ typedef struct uct_md_rcache_config { size_t alignment; /**< Force address alignment */ unsigned event_prio; /**< Memory events priority */ double overhead; /**< Lookup overhead estimation */ + unsigned long max_regions; /**< Maximal number of rcache regions */ + size_t max_size; /**< Maximal size of mapped memory */ } uct_md_rcache_config_t; @@ -151,6 +153,8 @@ ucs_status_t uct_md_stub_rkey_unpack(uct_component_t *component, const void *rkey_buffer, uct_rkey_t *rkey_p, void **handle_p); +void uct_md_set_rcache_params(); + extern ucs_config_field_t uct_md_config_table[]; #endif diff --git a/src/uct/cuda/gdr_copy/gdr_copy_md.c b/src/uct/cuda/gdr_copy/gdr_copy_md.c index c40e859897f..b6979780d02 100644 --- a/src/uct/cuda/gdr_copy/gdr_copy_md.c +++ b/src/uct/cuda/gdr_copy/gdr_copy_md.c @@ -385,11 +385,10 @@ uct_gdr_copy_md_open(uct_component_t *component, const char *md_name, } if (md_config->enable_rcache != UCS_NO) { + uct_md_set_rcache_params(&rcache_params, &md_config->rcache); rcache_params.region_struct_size = sizeof(uct_gdr_copy_rcache_region_t); - rcache_params.alignment = md_config->rcache.alignment; rcache_params.max_alignment = UCT_GDR_COPY_MD_RCACHE_DEFAULT_ALIGN; rcache_params.ucm_events = UCM_EVENT_MEM_TYPE_FREE; - rcache_params.ucm_event_priority = md_config->rcache.event_prio; rcache_params.context = md; rcache_params.ops = &uct_gdr_copy_rcache_ops; status = ucs_rcache_create(&rcache_params, "gdr_copy", NULL, &md->rcache); diff --git a/src/uct/ib/base/ib_md.c b/src/uct/ib/base/ib_md.c index 3bb0cd11f24..6f474f7d2e0 100644 --- a/src/uct/ib/base/ib_md.c +++ b/src/uct/ib/base/ib_md.c @@ -1138,15 +1138,14 @@ uct_ib_md_parse_reg_methods(uct_ib_md_t *md, uct_md_attr_t *md_attr, for (i = 0; i < md_config->reg_methods.count; ++i) { if (!strcasecmp(md_config->reg_methods.rmtd[i], "rcache")) { + uct_md_set_rcache_params(&rcache_params, &md_config->rcache); rcache_params.region_struct_size = sizeof(ucs_rcache_region_t) + md->ops->memh_struct_size; - rcache_params.alignment = md_config->rcache.alignment; rcache_params.max_alignment = ucs_get_page_size(); rcache_params.ucm_events = UCM_EVENT_VM_UNMAPPED; if (md_attr->cap.reg_mem_types & ~UCS_BIT(UCS_MEMORY_TYPE_HOST)) { rcache_params.ucm_events |= UCM_EVENT_MEM_TYPE_FREE; } - rcache_params.ucm_event_priority = md_config->rcache.event_prio; rcache_params.context = md; rcache_params.ops = &uct_ib_rcache_ops; diff --git a/src/uct/sm/mm/xpmem/mm_xpmem.c b/src/uct/sm/mm/xpmem/mm_xpmem.c index 318b7732d12..6c59ac29db4 100644 --- a/src/uct/sm/mm/xpmem/mm_xpmem.c +++ b/src/uct/sm/mm/xpmem/mm_xpmem.c @@ -267,6 +267,8 @@ uct_xpmem_rmem_add(xpmem_segid_t xsegid, uct_xpmem_remote_mem_t **rmem_p) rcache_params.ucm_event_priority = 0; rcache_params.ops = &uct_xpmem_rcache_ops; rcache_params.context = rmem; + rcache_params.max_regions = ULONG_MAX; + rcache_params.max_size = SIZE_MAX; status = ucs_rcache_create(&rcache_params, "xpmem_remote_mem", ucs_stats_get_root(), &rmem->rcache); diff --git a/src/uct/sm/scopy/knem/knem_md.c b/src/uct/sm/scopy/knem/knem_md.c index 44654504237..af9a673cfc6 100644 --- a/src/uct/sm/scopy/knem/knem_md.c +++ b/src/uct/sm/scopy/knem/knem_md.c @@ -353,11 +353,10 @@ uct_knem_md_open(uct_component_t *component, const char *md_name, } if (md_config->rcache_enable != UCS_NO) { + uct_md_set_rcache_params(&rcache_params, &md_config->rcache); rcache_params.region_struct_size = sizeof(uct_knem_rcache_region_t); - rcache_params.alignment = md_config->rcache.alignment; rcache_params.max_alignment = ucs_get_page_size(); rcache_params.ucm_events = UCM_EVENT_VM_UNMAPPED; - rcache_params.ucm_event_priority = md_config->rcache.event_prio; rcache_params.context = knem_md; rcache_params.ops = &uct_knem_rcache_ops; status = ucs_rcache_create(&rcache_params, "knem rcache device", diff --git a/test/gtest/ucs/test_rcache.cc b/test/gtest/ucs/test_rcache.cc index 422624cf331..8e72689f24d 100644 --- a/test/gtest/ucs/test_rcache.cc +++ b/test/gtest/ucs/test_rcache.cc @@ -30,7 +30,9 @@ UCS_TEST_F(test_rcache_basic, create_fail) { UCS_BIT(30), /* non-existing event */ 1000, &ops, - NULL + NULL, + ULONG_MAX, + SIZE_MAX }; ucs_rcache_t *rcache; @@ -57,22 +59,10 @@ class test_rcache : public ucs::test { virtual void init() { ucs::test::init(); - static const ucs_rcache_ops_t ops = { - mem_reg_cb, - mem_dereg_cb, - dump_region_cb - }; - ucs_rcache_params_t params = { - sizeof(region), - UCS_PGT_ADDR_ALIGN, - ucs_get_page_size(), - UCM_EVENT_VM_UNMAPPED, - 1000, - &ops, - reinterpret_cast(this) - }; + ucs_rcache_params params = rcache_params(); UCS_TEST_CREATE_HANDLE(ucs_rcache_t*, m_rcache, ucs_rcache_destroy, - ucs_rcache_create, ¶ms, "test", ucs_stats_get_root()); + ucs_rcache_create, ¶ms, "test", + ucs_stats_get_root()); } virtual void cleanup() { @@ -81,6 +71,26 @@ class test_rcache : public ucs::test { ucs::test::cleanup(); } + virtual ucs_rcache_params_t rcache_params() { + static const ucs_rcache_ops_t ops = { + mem_reg_cb, + mem_dereg_cb, + dump_region_cb + }; + ucs_rcache_params_t params = { + sizeof(region), + UCS_PGT_ADDR_ALIGN, + ucs_get_page_size(), + UCM_EVENT_VM_UNMAPPED, + 1000, + &ops, + reinterpret_cast(this), + ULONG_MAX, + SIZE_MAX + }; + return params; + } + region *get(void *address, size_t length, int prot = PROT_READ|PROT_WRITE) { ucs_status_t status; ucs_rcache_region_t *r; @@ -637,6 +647,99 @@ UCS_MT_TEST_F(test_rcache_no_register, merge_invalid_prot_slow, 5) munmap(mem, size1+size2); } +class test_rcache_with_limit : public test_rcache { +protected: + virtual ucs_rcache_params_t rcache_params() { + ucs_rcache_params_t params = test_rcache::rcache_params(); + params.max_regions = 2; + params.max_size = 1000; + params.alignment = 16; + return params; + } + + uint32_t get_put(void *ptr, size_t size) { + region *region = get(ptr, size); + uint32_t id = region->id; + put(region); + return id; + } +}; + +UCS_TEST_F(test_rcache_with_limit, by_count) { + static const size_t size = 32; + + /* First region will be added */ + void *ptr1 = malloc(size); + uint32_t region1_id = get_put(ptr1, size); + EXPECT_EQ(1, m_rcache.get()->num_regions); + + /* Second region will be added as well */ + void *ptr2 = malloc(size); + uint32_t region2_id = get_put(ptr2, size); + EXPECT_EQ(2, m_rcache.get()->num_regions); + + /* This time, something must be removed */ + void *ptr3 = malloc(size); + uint32_t region3_id = get_put(ptr3, size); + EXPECT_EQ(2, m_rcache.get()->num_regions); + + /* Second region should be kept by lru policy */ + uint32_t region2_new_id = get_put(ptr2, size); + EXPECT_EQ(region2_id, region2_new_id); + EXPECT_EQ(2, m_rcache.get()->num_regions); + + /* Third region should be also kept limit policy */ + uint32_t region3_new_id = get_put(ptr3, size); + EXPECT_EQ(region3_new_id, region3_id); + EXPECT_EQ(2, m_rcache.get()->num_regions); + + /* First region should be removed by lru policy */ + uint32_t region1_new_id = get_put(ptr1, size); + EXPECT_NE(region1_new_id, region1_id); + EXPECT_EQ(2, m_rcache.get()->num_regions); + + free(ptr3); + free(ptr2); + free(ptr1); +} + +UCS_TEST_F(test_rcache_with_limit, by_size) { + static const size_t size = 600; + + /* First region will be added */ + void *ptr1 = malloc(size); + get_put(ptr1, size); + EXPECT_EQ(1, m_rcache.get()->num_regions); + + /* Second region will cause removing of first region */ + void *ptr2 = malloc(size); + get_put(ptr2, size); + EXPECT_EQ(1, m_rcache.get()->num_regions); + + free(ptr2); + free(ptr1); +} + +UCS_TEST_F(test_rcache_with_limit, by_size_inuse) { + static const size_t size = 600; + + /* First region will be added */ + void *ptr1 = malloc(size); + region *region1 = get(ptr1, size); + EXPECT_EQ(1, m_rcache.get()->num_regions); + + /* Second region will NOT cause removing of first region since it's still in + * use */ + void *ptr2 = malloc(size); + get_put(ptr2, size); + EXPECT_EQ(2, m_rcache.get()->num_regions); + + put(region1); + + free(ptr2); + free(ptr1); +} + #ifdef ENABLE_STATS class test_rcache_stats : public test_rcache { protected: