35 #ifndef _RTE_MEMPOOL_H_
36 #define _RTE_MEMPOOL_H_
68 #include <sys/queue.h>
83 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
84 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
85 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
87 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
91 struct rte_mempool_debug_stats {
94 uint64_t get_success_bulk;
95 uint64_t get_success_objs;
96 uint64_t get_fail_bulk;
97 uint64_t get_fail_objs;
112 void *
objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3];
127 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
128 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
129 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
132 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
134 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
137 #define MEMPOOL_PG_NUM_DEFAULT 1
139 #ifndef RTE_MEMPOOL_ALIGN
140 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
143 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
158 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
168 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
176 struct rte_mempool_objtlr {
252 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
254 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
258 #define MEMPOOL_F_NO_SPREAD 0x0001
259 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002
260 #define MEMPOOL_F_SP_PUT 0x0004
261 #define MEMPOOL_F_SC_GET 0x0008
262 #define MEMPOOL_F_POOL_CREATED 0x0010
263 #define MEMPOOL_F_NO_PHYS_CONTIG 0x0020
275 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
276 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
277 unsigned __lcore_id = rte_lcore_id(); \
278 if (__lcore_id < RTE_MAX_LCORE) { \
279 mp->stats[__lcore_id].name##_objs += n; \
280 mp->stats[__lcore_id].name##_bulk += 1; \
284 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
295 #define MEMPOOL_HEADER_SIZE(mp, cs) \
296 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
297 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
322 static inline struct rte_mempool_objtlr *__mempool_get_trailer(
void *obj)
342 void rte_mempool_check_cookies(
const struct rte_mempool *mp,
343 void *
const *obj_table_const,
unsigned n,
int free);
345 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
346 #define __mempool_check_cookies(mp, obj_table_const, n, free) \
347 rte_mempool_check_cookies(mp, obj_table_const, n, free)
349 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
352 #define RTE_MEMPOOL_OPS_NAMESIZE 32
364 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
375 void *
const *obj_table,
unsigned int n);
381 void **obj_table,
unsigned int n);
398 #define RTE_MEMPOOL_MAX_OPS_IDX 16
409 struct rte_mempool_ops_table {
431 rte_mempool_get_ops(
int ops_index)
465 void **obj_table,
unsigned n)
469 ops = rte_mempool_get_ops(mp->
ops_index);
470 return ops->
dequeue(mp, obj_table, n);
487 rte_mempool_ops_enqueue_bulk(
struct rte_mempool *mp,
void *
const *obj_table,
492 ops = rte_mempool_get_ops(mp->
ops_index);
493 return ops->
enqueue(mp, obj_table, n);
505 rte_mempool_ops_get_count(
const struct rte_mempool *mp);
554 #define MEMPOOL_REGISTER_OPS(ops) \
555 void mp_hdlr_init_##ops(void); \
556 void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\
558 rte_mempool_register_ops(&ops); \
567 void *opaque,
void *obj,
unsigned obj_idx);
742 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
850 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
992 static inline void __attribute__((always_inline))
996 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
1013 if (mp->cache_size == 0)
1016 if (lcore_id >= RTE_MAX_LCORE)
1019 return &mp->local_cache[lcore_id];
1037 static inline void __attribute__((always_inline))
1038 __mempool_generic_put(struct
rte_mempool *mp,
void * const *obj_table,
1044 __MEMPOOL_STAT_ADD(mp, put, n);
1051 if (
unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
1054 cache_objs = &cache->objs[cache->len];
1064 rte_memcpy(&cache_objs[0], obj_table,
sizeof(
void *) * n);
1068 if (cache->len >= cache->flushthresh) {
1069 rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size],
1070 cache->len - cache->size);
1071 cache->len = cache->size;
1079 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1080 if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
1081 rte_panic(
"cannot put objects in mempool\n");
1083 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1103 static inline void __attribute__((always_inline))
1107 __mempool_check_cookies(mp, obj_table, n, 0);
1108 __mempool_generic_put(mp, obj_table, n, cache, flags);
1123 static inline void __attribute__((always_inline))
1144 static inline void __attribute__((always_inline))
1165 static inline void __attribute__((always_inline))
1184 static inline void __attribute__((always_inline))
1202 static inline void __attribute__((always_inline))
1220 static inline void __attribute__((always_inline))
1243 static inline int __attribute__((always_inline))
1244 __mempool_generic_get(struct
rte_mempool *mp,
void **obj_table,
1248 uint32_t index,
len;
1256 cache_objs = cache->objs;
1259 if (cache->len < n) {
1261 uint32_t req = n + (cache->size - cache->len);
1264 ret = rte_mempool_ops_dequeue_bulk(mp,
1265 &cache->objs[cache->len], req);
1280 for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
1281 *obj_table = cache_objs[
len];
1285 __MEMPOOL_STAT_ADD(mp, get_success, n);
1292 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
1295 __MEMPOOL_STAT_ADD(mp, get_fail, n);
1297 __MEMPOOL_STAT_ADD(mp, get_success, n);
1325 static inline int __attribute__((always_inline))
1330 ret = __mempool_generic_get(mp, obj_table, n, cache, flags);
1332 __mempool_check_cookies(mp, obj_table, n, 1);
1356 static inline int __attribute__((always_inline))
1385 static inline int __attribute__((always_inline))
1414 static inline int __attribute__((always_inline))
1440 static inline int __attribute__((always_inline))
1466 static inline int __attribute__((always_inline))
1492 static inline int __attribute__((always_inline))
1562 static inline unsigned
1747 size_t total_elt_sz,
const phys_addr_t paddr[], uint32_t pg_num,
__rte_deprecated unsigned rte_mempool_count(const struct rte_mempool *mp)
static int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
struct rte_mempool * rte_mempool_lookup(const char *name)
struct rte_mempool_cache * rte_mempool_cache_create(uint32_t size, int socket_id)
static __rte_deprecated int rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
static __rte_deprecated unsigned rte_mempool_free_count(const struct rte_mempool *mp)
static struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
STAILQ_ENTRY(rte_mempool_objhdr) next
const struct rte_memzone * mz
static phys_addr_t rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
static __rte_deprecated int rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
void rte_mempool_list_dump(FILE *f)
void( rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr, void *opaque)
rte_mempool_alloc_t alloc
struct rte_mempool * rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags, void *vaddr, const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
static int rte_mempool_empty(const struct rte_mempool *mp)
static __rte_deprecated void rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
static __rte_deprecated int rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
rte_mempool_memchunk_free_cb_t * free_cb
void( rte_mempool_ctor_t)(struct rte_mempool *, void *)
char name[RTE_MEMPOOL_OPS_NAMESIZE]
#define MEMPOOL_HEADER_SIZE(mp, cs)
unsigned(* rte_mempool_get_count)(const struct rte_mempool *mp)
static int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
unsigned int rte_mempool_avail_count(const struct rte_mempool *mp)
struct rte_mempool_objhdr_list elt_list
int(* rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n)
#define RTE_PTR_ADD(ptr, x)
void rte_mempool_cache_free(struct rte_mempool_cache *cache)
uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
int rte_mempool_register_ops(const struct rte_mempool_ops *ops)
int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
int rte_mempool_populate_default(struct rte_mempool *mp)
static void rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp)
#define RTE_MEMPOOL_OPS_NAMESIZE
STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr)
struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]
rte_mempool_get_count get_count
static int rte_mempool_full(const struct rte_mempool *mp)
static unsigned rte_lcore_id(void)
unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp)
int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, void *pool_config)
static __rte_deprecated int rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
void( rte_mempool_obj_cb_t)(struct rte_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
static void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned n)
uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz)
void * objs[RTE_MEMPOOL_CACHE_MAX_SIZE *3]
static __rte_deprecated void rte_mempool_mp_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned n)
int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
void( rte_mempool_mem_cb_t)(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr, unsigned mem_idx)
void rte_mempool_audit(struct rte_mempool *mp)
struct rte_mempool * rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, int socket_id, unsigned flags)
int(* rte_mempool_enqueue_t)(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
unsigned private_data_size
void rte_mempool_dump(FILE *f, struct rte_mempool *mp)
int rte_mempool_populate_anon(struct rte_mempool *mp)
struct rte_mempool_cache * local_cache
STAILQ_ENTRY(rte_mempool_memhdr) next
static int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n, struct rte_mempool_cache *cache, int flags)
rte_mempool_dequeue_t dequeue
void(* rte_mempool_free_t)(struct rte_mempool *mp)
int(* rte_mempool_alloc_t)(struct rte_mempool *mp)
ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
struct rte_mempool_memhdr_list mem_list
struct rte_mempool * rte_mempool_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags)
#define RTE_PTR_SUB(ptr, x)
#define __rte_cache_aligned
size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift)
static struct rte_mempool * rte_mempool_from_obj(void *obj)
static __rte_deprecated void rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
static void * rte_memcpy(void *dst, const void *src, size_t n)
static __rte_deprecated void rte_mempool_sp_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned n)
static void rte_mempool_generic_put(struct rte_mempool *mp, void *const *obj_table, unsigned n, struct rte_mempool_cache *cache, int flags)
void rte_mempool_free(struct rte_mempool *mp)
void rte_mempool_walk(void(*func)(struct rte_mempool *, void *arg), void *arg)
static void * rte_mempool_get_priv(struct rte_mempool *mp)
char name[RTE_MEMZONE_NAMESIZE]
static void rte_mempool_put(struct rte_mempool *mp, void *obj)
int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
#define RTE_MEMPOOL_MAX_OPS_IDX
rte_mempool_enqueue_t enqueue
#define RTE_MEMZONE_NAMESIZE