diff options
author | Changqing Li <changqing.li@windriver.com> | 2021-10-29 09:22:21 +0800 |
---|---|---|
committer | Armin Kuster <akuster808@gmail.com> | 2021-10-28 21:14:44 -0700 |
commit | 43004de1bf2fb80732a7c7c0af5cb84420568b66 (patch) | |
tree | 65c81cdeea836d7be160a700b066d0fc171f3716 | |
parent | b7e32131a592520aeac6b6c00aec141818e96168 (diff) | |
download | meta-openembedded-43004de1bf2fb80732a7c7c0af5cb84420568b66.tar.gz |
redis: fix sereval CVEs
Fixes CVEs:
CVE-2021-32626
CVE-2021-32627
CVE-2021-32628
CVE-2021-32675
CVE-2021-32687
CVE-2021-32762
CVE-2021-41099
Signed-off-by: Changqing Li <changqing.li@windriver.com>
Signed-off-by: Armin Kuster <akuster808@gmail.com>
7 files changed, 1338 insertions, 0 deletions
diff --git a/meta-oe/recipes-extended/redis/redis/CVE-2021-32626.patch b/meta-oe/recipes-extended/redis/redis/CVE-2021-32626.patch new file mode 100644 index 0000000000..0cfc12b3d9 --- /dev/null +++ b/meta-oe/recipes-extended/redis/redis/CVE-2021-32626.patch | |||
@@ -0,0 +1,148 @@ | |||
1 | From 6ce827254484fd850240549c98c74bca77980cc0 Mon Sep 17 00:00:00 2001 | ||
2 | From: "meir@redislabs.com" <meir@redislabs.com> | ||
3 | Date: Sun, 13 Jun 2021 14:27:18 +0300 | ||
4 | Subject: [PATCH] Fix invalid memory write on lua stack overflow | ||
5 | {CVE-2021-32626} | ||
6 | MIME-Version: 1.0 | ||
7 | Content-Type: text/plain; charset=UTF-8 | ||
8 | Content-Transfer-Encoding: 8bit | ||
9 | |||
10 | When LUA call our C code, by default, the LUA stack has room for 20 | ||
11 | elements. In most cases, this is more than enough but sometimes it's not | ||
12 | and the caller must verify the LUA stack size before he pushes elements. | ||
13 | |||
14 | On 3 places in the code, there was no verification of the LUA stack size. | ||
15 | On specific inputs this missing verification could have lead to invalid | ||
16 | memory write: | ||
17 | 1. On 'luaReplyToRedisReply', one might return a nested reply that will | ||
18 | explode the LUA stack. | ||
19 | 2. On 'redisProtocolToLuaType', the Redis reply might be deep enough | ||
20 | to explode the LUA stack (notice that currently there is no such | ||
21 | command in Redis that returns such a nested reply, but modules might | ||
22 | do it) | ||
23 | 3. On 'ldbRedis', one might give a command with enough arguments to | ||
24 | explode the LUA stack (all the arguments will be pushed to the LUA | ||
25 | stack) | ||
26 | |||
27 | This commit is solving all those 3 issues by calling 'lua_checkstack' and | ||
28 | verify that there is enough room in the LUA stack to push elements. In | ||
29 | case 'lua_checkstack' returns an error (there is not enough room in the | ||
30 | LUA stack and it's not possible to increase the stack), we will do the | ||
31 | following: | ||
32 | 1. On 'luaReplyToRedisReply', we will return an error to the user. | ||
33 | 2. On 'redisProtocolToLuaType' we will exit with panic (we assume this | ||
34 | scenario is rare because it can only happen with a module). | ||
35 | 3. On 'ldbRedis', we return an error. | ||
36 | |||
37 | CVE: CVE-2021-32626 | ||
38 | Upstream-Status: Backport[https://github.com/redis/redis/commit/666ed7facf4524bf6d19b11b20faa2cf93fdf591] | ||
39 | |||
40 | Signed-off-by: Changqing Li <changqing.li@windriver.com> | ||
41 | --- | ||
42 | src/scripting.c | 41 +++++++++++++++++++++++++++++++++++++++++ | ||
43 | 1 file changed, 41 insertions(+) | ||
44 | |||
45 | diff --git a/src/scripting.c b/src/scripting.c | ||
46 | index 299e608..81c88fb 100644 | ||
47 | --- a/src/scripting.c | ||
48 | +++ b/src/scripting.c | ||
49 | @@ -128,6 +128,16 @@ void sha1hex(char *digest, char *script, size_t len) { | ||
50 | */ | ||
51 | |||
52 | char *redisProtocolToLuaType(lua_State *lua, char* reply) { | ||
53 | + | ||
54 | + if (!lua_checkstack(lua, 5)) { | ||
55 | + /* | ||
56 | + * Increase the Lua stack if needed, to make sure there is enough room | ||
57 | + * to push 5 elements to the stack. On failure, exit with panic. | ||
58 | + * Notice that we need, in the worst case, 5 elements because redisProtocolToLuaType_Aggregate | ||
59 | + * might push 5 elements to the Lua stack.*/ | ||
60 | + serverPanic("lua stack limit reach when parsing redis.call reply"); | ||
61 | + } | ||
62 | + | ||
63 | char *p = reply; | ||
64 | |||
65 | switch(*p) { | ||
66 | @@ -220,6 +230,11 @@ char *redisProtocolToLuaType_Aggregate(lua_State *lua, char *reply, int atype) { | ||
67 | if (atype == '%') { | ||
68 | p = redisProtocolToLuaType(lua,p); | ||
69 | } else { | ||
70 | + if (!lua_checkstack(lua, 1)) { | ||
71 | + /* Notice that here we need to check the stack again because the recursive | ||
72 | + * call to redisProtocolToLuaType might have use the room allocated in the stack */ | ||
73 | + serverPanic("lua stack limit reach when parsing redis.call reply"); | ||
74 | + } | ||
75 | lua_pushboolean(lua,1); | ||
76 | } | ||
77 | lua_settable(lua,-3); | ||
78 | @@ -339,6 +354,17 @@ void luaSortArray(lua_State *lua) { | ||
79 | /* Reply to client 'c' converting the top element in the Lua stack to a | ||
80 | * Redis reply. As a side effect the element is consumed from the stack. */ | ||
81 | void luaReplyToRedisReply(client *c, lua_State *lua) { | ||
82 | + | ||
83 | + if (!lua_checkstack(lua, 4)) { | ||
84 | + /* Increase the Lua stack if needed to make sure there is enough room | ||
85 | + * to push 4 elements to the stack. On failure, return error. | ||
86 | + * Notice that we need, in the worst case, 4 elements because returning a map might | ||
87 | + * require push 4 elements to the Lua stack.*/ | ||
88 | + addReplyErrorFormat(c, "reached lua stack limit"); | ||
89 | + lua_pop(lua,1); // pop the element from the stack | ||
90 | + return; | ||
91 | + } | ||
92 | + | ||
93 | int t = lua_type(lua,-1); | ||
94 | |||
95 | switch(t) { | ||
96 | @@ -362,6 +388,7 @@ void luaReplyToRedisReply(client *c, lua_State *lua) { | ||
97 | * field. */ | ||
98 | |||
99 | /* Handle error reply. */ | ||
100 | + // we took care of the stack size on function start | ||
101 | lua_pushstring(lua,"err"); | ||
102 | lua_gettable(lua,-2); | ||
103 | t = lua_type(lua,-1); | ||
104 | @@ -404,6 +431,7 @@ void luaReplyToRedisReply(client *c, lua_State *lua) { | ||
105 | if (t == LUA_TTABLE) { | ||
106 | int maplen = 0; | ||
107 | void *replylen = addReplyDeferredLen(c); | ||
108 | + /* we took care of the stack size on function start */ | ||
109 | lua_pushnil(lua); /* Use nil to start iteration. */ | ||
110 | while (lua_next(lua,-2)) { | ||
111 | /* Stack now: table, key, value */ | ||
112 | @@ -426,6 +454,7 @@ void luaReplyToRedisReply(client *c, lua_State *lua) { | ||
113 | if (t == LUA_TTABLE) { | ||
114 | int setlen = 0; | ||
115 | void *replylen = addReplyDeferredLen(c); | ||
116 | + /* we took care of the stack size on function start */ | ||
117 | lua_pushnil(lua); /* Use nil to start iteration. */ | ||
118 | while (lua_next(lua,-2)) { | ||
119 | /* Stack now: table, key, true */ | ||
120 | @@ -445,6 +474,7 @@ void luaReplyToRedisReply(client *c, lua_State *lua) { | ||
121 | void *replylen = addReplyDeferredLen(c); | ||
122 | int j = 1, mbulklen = 0; | ||
123 | while(1) { | ||
124 | + /* we took care of the stack size on function start */ | ||
125 | lua_pushnumber(lua,j++); | ||
126 | lua_gettable(lua,-2); | ||
127 | t = lua_type(lua,-1); | ||
128 | @@ -2546,6 +2576,17 @@ void ldbEval(lua_State *lua, sds *argv, int argc) { | ||
129 | void ldbRedis(lua_State *lua, sds *argv, int argc) { | ||
130 | int j, saved_rc = server.lua_replicate_commands; | ||
131 | |||
132 | + if (!lua_checkstack(lua, argc + 1)) { | ||
133 | + /* Increase the Lua stack if needed to make sure there is enough room | ||
134 | + * to push 'argc + 1' elements to the stack. On failure, return error. | ||
135 | + * Notice that we need, in worst case, 'argc + 1' elements because we push all the arguments | ||
136 | + * given by the user (without the first argument) and we also push the 'redis' global table and | ||
137 | + * 'redis.call' function so: | ||
138 | + * (1 (redis table)) + (1 (redis.call function)) + (argc - 1 (all arguments without the first)) = argc + 1*/ | ||
139 | + ldbLogRedisReply("max lua stack reached"); | ||
140 | + return; | ||
141 | + } | ||
142 | + | ||
143 | lua_getglobal(lua,"redis"); | ||
144 | lua_pushstring(lua,"call"); | ||
145 | lua_gettable(lua,-2); /* Stack: redis, redis.call */ | ||
146 | -- | ||
147 | 2.17.1 | ||
148 | |||
diff --git a/meta-oe/recipes-extended/redis/redis/CVE-2021-32627-CVE-2021-32628.patch b/meta-oe/recipes-extended/redis/redis/CVE-2021-32627-CVE-2021-32628.patch new file mode 100644 index 0000000000..3c60a3e678 --- /dev/null +++ b/meta-oe/recipes-extended/redis/redis/CVE-2021-32627-CVE-2021-32628.patch | |||
@@ -0,0 +1,873 @@ | |||
1 | From 2775a3526e3e8bb040e72995231632c801977395 Mon Sep 17 00:00:00 2001 | ||
2 | From: Oran Agra <oran@redislabs.com> | ||
3 | Date: Thu, 3 Jun 2021 12:10:02 +0300 | ||
4 | Subject: [PATCH] Fix ziplist and listpack overflows and truncations | ||
5 | (CVE-2021-32627, CVE-2021-32628) | ||
6 | |||
7 | - fix possible heap corruption in ziplist and listpack resulting by trying to | ||
8 | allocate more than the maximum size of 4GB. | ||
9 | - prevent ziplist (hash and zset) from reaching size of above 1GB, will be | ||
10 | converted to HT encoding, that's not a useful size. | ||
11 | - prevent listpack (stream) from reaching size of above 1GB. | ||
12 | - XADD will start a new listpack if the new record may cause the previous | ||
13 | listpack to grow over 1GB. | ||
14 | - XADD will respond with an error if a single stream record is over 1GB | ||
15 | - List type (ziplist in quicklist) was truncating strings that were over 4GB, | ||
16 | now it'll respond with an error. | ||
17 | |||
18 | CVE: CVE-2021-32627,CVE-2021-32628 | ||
19 | Upstream-Status: Backport[https://github.com/redis/redis/commit/f6a40570fa63d5afdd596c78083d754081d80ae3] | ||
20 | |||
21 | Signed-off-by: Changqing Li <changqing.li@windriver.com> | ||
22 | |||
23 | --- | ||
24 | src/geo.c | 5 +- | ||
25 | src/listpack.c | 2 +- | ||
26 | src/module.c | 6 +- | ||
27 | src/quicklist.c | 16 +++- | ||
28 | src/rdb.c | 45 +++++++---- | ||
29 | src/server.h | 2 +- | ||
30 | src/t_hash.c | 13 +++- | ||
31 | src/t_list.c | 29 +++++++ | ||
32 | src/t_stream.c | 48 +++++++++--- | ||
33 | src/t_zset.c | 62 +++++++++------ | ||
34 | src/ziplist.c | 17 ++++- | ||
35 | src/ziplist.h | 1 + | ||
36 | tests/unit/violations.tcl | 156 ++++++++++++++++++++++++++++++++++++++ | ||
37 | 13 files changed, 341 insertions(+), 61 deletions(-) | ||
38 | create mode 100644 tests/unit/violations.tcl | ||
39 | |||
40 | diff --git a/src/geo.c b/src/geo.c | ||
41 | index 7c75738a2..893f78a7e 100644 | ||
42 | --- a/src/geo.c | ||
43 | +++ b/src/geo.c | ||
44 | @@ -770,7 +770,7 @@ void georadiusGeneric(client *c, int srcKeyIndex, int flags) { | ||
45 | robj *zobj; | ||
46 | zset *zs; | ||
47 | int i; | ||
48 | - size_t maxelelen = 0; | ||
49 | + size_t maxelelen = 0, totelelen = 0; | ||
50 | |||
51 | if (returned_items) { | ||
52 | zobj = createZsetObject(); | ||
53 | @@ -785,13 +785,14 @@ void georadiusGeneric(client *c, int srcKeyIndex, int flags) { | ||
54 | size_t elelen = sdslen(gp->member); | ||
55 | |||
56 | if (maxelelen < elelen) maxelelen = elelen; | ||
57 | + totelelen += elelen; | ||
58 | znode = zslInsert(zs->zsl,score,gp->member); | ||
59 | serverAssert(dictAdd(zs->dict,gp->member,&znode->score) == DICT_OK); | ||
60 | gp->member = NULL; | ||
61 | } | ||
62 | |||
63 | if (returned_items) { | ||
64 | - zsetConvertToZiplistIfNeeded(zobj,maxelelen); | ||
65 | + zsetConvertToZiplistIfNeeded(zobj,maxelelen,totelelen); | ||
66 | setKey(c,c->db,storekey,zobj); | ||
67 | decrRefCount(zobj); | ||
68 | notifyKeyspaceEvent(NOTIFY_ZSET,flags & GEOSEARCH ? "geosearchstore" : "georadiusstore",storekey, | ||
69 | diff --git a/src/listpack.c b/src/listpack.c | ||
70 | index ee256bad3..27622d4a5 100644 | ||
71 | --- a/src/listpack.c | ||
72 | +++ b/src/listpack.c | ||
73 | @@ -313,7 +313,7 @@ int lpEncodeGetType(unsigned char *ele, uint32_t size, unsigned char *intenc, ui | ||
74 | } else { | ||
75 | if (size < 64) *enclen = 1+size; | ||
76 | else if (size < 4096) *enclen = 2+size; | ||
77 | - else *enclen = 5+size; | ||
78 | + else *enclen = 5+(uint64_t)size; | ||
79 | return LP_ENCODING_STRING; | ||
80 | } | ||
81 | } | ||
82 | diff --git a/src/module.c b/src/module.c | ||
83 | index bf6580a60..adca9dc9c 100644 | ||
84 | --- a/src/module.c | ||
85 | +++ b/src/module.c | ||
86 | @@ -3319,6 +3319,7 @@ int RM_HashGet(RedisModuleKey *key, int flags, ...) { | ||
87 | * - EDOM if the given ID was 0-0 or not greater than all other IDs in the | ||
88 | * stream (only if the AUTOID flag is unset) | ||
89 | * - EFBIG if the stream has reached the last possible ID | ||
90 | + * - ERANGE if the elements are too large to be stored. | ||
91 | */ | ||
92 | int RM_StreamAdd(RedisModuleKey *key, int flags, RedisModuleStreamID *id, RedisModuleString **argv, long numfields) { | ||
93 | /* Validate args */ | ||
94 | @@ -3362,8 +3363,9 @@ int RM_StreamAdd(RedisModuleKey *key, int flags, RedisModuleStreamID *id, RedisM | ||
95 | use_id_ptr = &use_id; | ||
96 | } | ||
97 | if (streamAppendItem(s, argv, numfields, &added_id, use_id_ptr) == C_ERR) { | ||
98 | - /* ID not greater than all existing IDs in the stream */ | ||
99 | - errno = EDOM; | ||
100 | + /* Either the ID not greater than all existing IDs in the stream, or | ||
101 | + * the elements are too large to be stored. either way, errno is already | ||
102 | + * set by streamAppendItem. */ | ||
103 | return REDISMODULE_ERR; | ||
104 | } | ||
105 | /* Postponed signalKeyAsReady(). Done implicitly by moduleCreateEmptyKey() | ||
106 | diff --git a/src/quicklist.c b/src/quicklist.c | ||
107 | index 5a1e41dcc..a9f8b43b1 100644 | ||
108 | --- a/src/quicklist.c | ||
109 | +++ b/src/quicklist.c | ||
110 | @@ -45,11 +45,16 @@ | ||
111 | #define REDIS_STATIC static | ||
112 | #endif | ||
113 | |||
114 | -/* Optimization levels for size-based filling */ | ||
115 | +/* Optimization levels for size-based filling. | ||
116 | + * Note that the largest possible limit is 16k, so even if each record takes | ||
117 | + * just one byte, it still won't overflow the 16 bit count field. */ | ||
118 | static const size_t optimization_level[] = {4096, 8192, 16384, 32768, 65536}; | ||
119 | |||
120 | /* Maximum size in bytes of any multi-element ziplist. | ||
121 | - * Larger values will live in their own isolated ziplists. */ | ||
122 | + * Larger values will live in their own isolated ziplists. | ||
123 | + * This is used only if we're limited by record count. when we're limited by | ||
124 | + * size, the maximum limit is bigger, but still safe. | ||
125 | + * 8k is a recommended / default size limit */ | ||
126 | #define SIZE_SAFETY_LIMIT 8192 | ||
127 | |||
128 | /* Minimum ziplist size in bytes for attempting compression. */ | ||
129 | @@ -444,6 +449,8 @@ REDIS_STATIC int _quicklistNodeAllowInsert(const quicklistNode *node, | ||
130 | unsigned int new_sz = node->sz + sz + ziplist_overhead; | ||
131 | if (likely(_quicklistNodeSizeMeetsOptimizationRequirement(new_sz, fill))) | ||
132 | return 1; | ||
133 | + /* when we return 1 above we know that the limit is a size limit (which is | ||
134 | + * safe, see comments next to optimization_level and SIZE_SAFETY_LIMIT) */ | ||
135 | else if (!sizeMeetsSafetyLimit(new_sz)) | ||
136 | return 0; | ||
137 | else if ((int)node->count < fill) | ||
138 | @@ -463,6 +470,8 @@ REDIS_STATIC int _quicklistNodeAllowMerge(const quicklistNode *a, | ||
139 | unsigned int merge_sz = a->sz + b->sz - 11; | ||
140 | if (likely(_quicklistNodeSizeMeetsOptimizationRequirement(merge_sz, fill))) | ||
141 | return 1; | ||
142 | + /* when we return 1 above we know that the limit is a size limit (which is | ||
143 | + * safe, see comments next to optimization_level and SIZE_SAFETY_LIMIT) */ | ||
144 | else if (!sizeMeetsSafetyLimit(merge_sz)) | ||
145 | return 0; | ||
146 | else if ((int)(a->count + b->count) <= fill) | ||
147 | @@ -482,6 +491,7 @@ REDIS_STATIC int _quicklistNodeAllowMerge(const quicklistNode *a, | ||
148 | * Returns 1 if new head created. */ | ||
149 | int quicklistPushHead(quicklist *quicklist, void *value, size_t sz) { | ||
150 | quicklistNode *orig_head = quicklist->head; | ||
151 | + assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */ | ||
152 | if (likely( | ||
153 | _quicklistNodeAllowInsert(quicklist->head, quicklist->fill, sz))) { | ||
154 | quicklist->head->zl = | ||
155 | @@ -505,6 +515,7 @@ int quicklistPushHead(quicklist *quicklist, void *value, size_t sz) { | ||
156 | * Returns 1 if new tail created. */ | ||
157 | int quicklistPushTail(quicklist *quicklist, void *value, size_t sz) { | ||
158 | quicklistNode *orig_tail = quicklist->tail; | ||
159 | + assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */ | ||
160 | if (likely( | ||
161 | _quicklistNodeAllowInsert(quicklist->tail, quicklist->fill, sz))) { | ||
162 | quicklist->tail->zl = | ||
163 | @@ -847,6 +858,7 @@ REDIS_STATIC void _quicklistInsert(quicklist *quicklist, quicklistEntry *entry, | ||
164 | int fill = quicklist->fill; | ||
165 | quicklistNode *node = entry->node; | ||
166 | quicklistNode *new_node = NULL; | ||
167 | + assert(sz < UINT32_MAX); /* TODO: add support for quicklist nodes that are sds encoded (not zipped) */ | ||
168 | |||
169 | if (!node) { | ||
170 | /* we have no reference node, so let's create only node in the list */ | ||
171 | diff --git a/src/rdb.c b/src/rdb.c | ||
172 | index 53f67a72e..5456c1d80 100644 | ||
173 | --- a/src/rdb.c | ||
174 | +++ b/src/rdb.c | ||
175 | @@ -1625,7 +1625,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) { | ||
176 | } else if (rdbtype == RDB_TYPE_ZSET_2 || rdbtype == RDB_TYPE_ZSET) { | ||
177 | /* Read list/set value. */ | ||
178 | uint64_t zsetlen; | ||
179 | - size_t maxelelen = 0; | ||
180 | + size_t maxelelen = 0, totelelen = 0; | ||
181 | zset *zs; | ||
182 | |||
183 | if ((zsetlen = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL; | ||
184 | @@ -1665,6 +1665,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) { | ||
185 | |||
186 | /* Don't care about integer-encoded strings. */ | ||
187 | if (sdslen(sdsele) > maxelelen) maxelelen = sdslen(sdsele); | ||
188 | + totelelen += sdslen(sdsele); | ||
189 | |||
190 | znode = zslInsert(zs->zsl,score,sdsele); | ||
191 | if (dictAdd(zs->dict,sdsele,&znode->score) != DICT_OK) { | ||
192 | @@ -1677,8 +1678,11 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) { | ||
193 | |||
194 | /* Convert *after* loading, since sorted sets are not stored ordered. */ | ||
195 | if (zsetLength(o) <= server.zset_max_ziplist_entries && | ||
196 | - maxelelen <= server.zset_max_ziplist_value) | ||
197 | - zsetConvert(o,OBJ_ENCODING_ZIPLIST); | ||
198 | + maxelelen <= server.zset_max_ziplist_value && | ||
199 | + ziplistSafeToAdd(NULL, totelelen)) | ||
200 | + { | ||
201 | + zsetConvert(o,OBJ_ENCODING_ZIPLIST); | ||
202 | + } | ||
203 | } else if (rdbtype == RDB_TYPE_HASH) { | ||
204 | uint64_t len; | ||
205 | int ret; | ||
206 | @@ -1731,21 +1735,30 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) { | ||
207 | } | ||
208 | } | ||
209 | |||
210 | - /* Add pair to ziplist */ | ||
211 | - o->ptr = ziplistPush(o->ptr, (unsigned char*)field, | ||
212 | - sdslen(field), ZIPLIST_TAIL); | ||
213 | - o->ptr = ziplistPush(o->ptr, (unsigned char*)value, | ||
214 | - sdslen(value), ZIPLIST_TAIL); | ||
215 | - | ||
216 | /* Convert to hash table if size threshold is exceeded */ | ||
217 | if (sdslen(field) > server.hash_max_ziplist_value || | ||
218 | - sdslen(value) > server.hash_max_ziplist_value) | ||
219 | + sdslen(value) > server.hash_max_ziplist_value || | ||
220 | + !ziplistSafeToAdd(o->ptr, sdslen(field)+sdslen(value))) | ||
221 | { | ||
222 | - sdsfree(field); | ||
223 | - sdsfree(value); | ||
224 | hashTypeConvert(o, OBJ_ENCODING_HT); | ||
225 | + ret = dictAdd((dict*)o->ptr, field, value); | ||
226 | + if (ret == DICT_ERR) { | ||
227 | + rdbReportCorruptRDB("Duplicate hash fields detected"); | ||
228 | + if (dupSearchDict) dictRelease(dupSearchDict); | ||
229 | + sdsfree(value); | ||
230 | + sdsfree(field); | ||
231 | + decrRefCount(o); | ||
232 | + return NULL; | ||
233 | + } | ||
234 | break; | ||
235 | } | ||
236 | + | ||
237 | + /* Add pair to ziplist */ | ||
238 | + o->ptr = ziplistPush(o->ptr, (unsigned char*)field, | ||
239 | + sdslen(field), ZIPLIST_TAIL); | ||
240 | + o->ptr = ziplistPush(o->ptr, (unsigned char*)value, | ||
241 | + sdslen(value), ZIPLIST_TAIL); | ||
242 | + | ||
243 | sdsfree(field); | ||
244 | sdsfree(value); | ||
245 | } | ||
246 | @@ -1858,12 +1871,11 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) { | ||
247 | while ((zi = zipmapNext(zi, &fstr, &flen, &vstr, &vlen)) != NULL) { | ||
248 | if (flen > maxlen) maxlen = flen; | ||
249 | if (vlen > maxlen) maxlen = vlen; | ||
250 | - zl = ziplistPush(zl, fstr, flen, ZIPLIST_TAIL); | ||
251 | - zl = ziplistPush(zl, vstr, vlen, ZIPLIST_TAIL); | ||
252 | |||
253 | /* search for duplicate records */ | ||
254 | sds field = sdstrynewlen(fstr, flen); | ||
255 | - if (!field || dictAdd(dupSearchDict, field, NULL) != DICT_OK) { | ||
256 | + if (!field || dictAdd(dupSearchDict, field, NULL) != DICT_OK || | ||
257 | + !ziplistSafeToAdd(zl, (size_t)flen + vlen)) { | ||
258 | rdbReportCorruptRDB("Hash zipmap with dup elements, or big length (%u)", flen); | ||
259 | dictRelease(dupSearchDict); | ||
260 | sdsfree(field); | ||
261 | @@ -1872,6 +1884,9 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) { | ||
262 | decrRefCount(o); | ||
263 | return NULL; | ||
264 | } | ||
265 | + | ||
266 | + zl = ziplistPush(zl, fstr, flen, ZIPLIST_TAIL); | ||
267 | + zl = ziplistPush(zl, vstr, vlen, ZIPLIST_TAIL); | ||
268 | } | ||
269 | |||
270 | dictRelease(dupSearchDict); | ||
271 | diff --git a/src/server.h b/src/server.h | ||
272 | index d9fef9552..07b34c743 100644 | ||
273 | --- a/src/server.h | ||
274 | +++ b/src/server.h | ||
275 | @@ -2173,7 +2173,7 @@ unsigned char *zzlFirstInRange(unsigned char *zl, zrangespec *range); | ||
276 | unsigned char *zzlLastInRange(unsigned char *zl, zrangespec *range); | ||
277 | unsigned long zsetLength(const robj *zobj); | ||
278 | void zsetConvert(robj *zobj, int encoding); | ||
279 | -void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen); | ||
280 | +void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen, size_t totelelen); | ||
281 | int zsetScore(robj *zobj, sds member, double *score); | ||
282 | unsigned long zslGetRank(zskiplist *zsl, double score, sds o); | ||
283 | int zsetAdd(robj *zobj, double score, sds ele, int in_flags, int *out_flags, double *newscore); | ||
284 | diff --git a/src/t_hash.c b/src/t_hash.c | ||
285 | index ea0606fb0..2720fdbc7 100644 | ||
286 | --- a/src/t_hash.c | ||
287 | +++ b/src/t_hash.c | ||
288 | @@ -39,17 +39,22 @@ | ||
289 | * as their string length can be queried in constant time. */ | ||
290 | void hashTypeTryConversion(robj *o, robj **argv, int start, int end) { | ||
291 | int i; | ||
292 | + size_t sum = 0; | ||
293 | |||
294 | if (o->encoding != OBJ_ENCODING_ZIPLIST) return; | ||
295 | |||
296 | for (i = start; i <= end; i++) { | ||
297 | - if (sdsEncodedObject(argv[i]) && | ||
298 | - sdslen(argv[i]->ptr) > server.hash_max_ziplist_value) | ||
299 | - { | ||
300 | + if (!sdsEncodedObject(argv[i])) | ||
301 | + continue; | ||
302 | + size_t len = sdslen(argv[i]->ptr); | ||
303 | + if (len > server.hash_max_ziplist_value) { | ||
304 | hashTypeConvert(o, OBJ_ENCODING_HT); | ||
305 | - break; | ||
306 | + return; | ||
307 | } | ||
308 | + sum += len; | ||
309 | } | ||
310 | + if (!ziplistSafeToAdd(o->ptr, sum)) | ||
311 | + hashTypeConvert(o, OBJ_ENCODING_HT); | ||
312 | } | ||
313 | |||
314 | /* Get the value from a ziplist encoded hash, identified by field. | ||
315 | diff --git a/src/t_list.c b/src/t_list.c | ||
316 | index f8ca27458..66c9e3c9d 100644 | ||
317 | --- a/src/t_list.c | ||
318 | +++ b/src/t_list.c | ||
319 | @@ -29,6 +29,8 @@ | ||
320 | |||
321 | #include "server.h" | ||
322 | |||
323 | +#define LIST_MAX_ITEM_SIZE ((1ull<<32)-1024) | ||
324 | + | ||
325 | /*----------------------------------------------------------------------------- | ||
326 | * List API | ||
327 | *----------------------------------------------------------------------------*/ | ||
328 | @@ -224,6 +226,13 @@ robj *listTypeDup(robj *o) { | ||
329 | void pushGenericCommand(client *c, int where, int xx) { | ||
330 | int j; | ||
331 | |||
332 | + for (j = 2; j < c->argc; j++) { | ||
333 | + if (sdslen(c->argv[j]->ptr) > LIST_MAX_ITEM_SIZE) { | ||
334 | + addReplyError(c, "Element too large"); | ||
335 | + return; | ||
336 | + } | ||
337 | + } | ||
338 | + | ||
339 | robj *lobj = lookupKeyWrite(c->db, c->argv[1]); | ||
340 | if (checkType(c,lobj,OBJ_LIST)) return; | ||
341 | if (!lobj) { | ||
342 | @@ -287,6 +296,11 @@ void linsertCommand(client *c) { | ||
343 | return; | ||
344 | } | ||
345 | |||
346 | + if (sdslen(c->argv[4]->ptr) > LIST_MAX_ITEM_SIZE) { | ||
347 | + addReplyError(c, "Element too large"); | ||
348 | + return; | ||
349 | + } | ||
350 | + | ||
351 | if ((subject = lookupKeyWriteOrReply(c,c->argv[1],shared.czero)) == NULL || | ||
352 | checkType(c,subject,OBJ_LIST)) return; | ||
353 | |||
354 | @@ -354,6 +368,11 @@ void lsetCommand(client *c) { | ||
355 | long index; | ||
356 | robj *value = c->argv[3]; | ||
357 | |||
358 | + if (sdslen(value->ptr) > LIST_MAX_ITEM_SIZE) { | ||
359 | + addReplyError(c, "Element too large"); | ||
360 | + return; | ||
361 | + } | ||
362 | + | ||
363 | if ((getLongFromObjectOrReply(c, c->argv[2], &index, NULL) != C_OK)) | ||
364 | return; | ||
365 | |||
366 | @@ -576,6 +595,11 @@ void lposCommand(client *c) { | ||
367 | int direction = LIST_TAIL; | ||
368 | long rank = 1, count = -1, maxlen = 0; /* Count -1: option not given. */ | ||
369 | |||
370 | + if (sdslen(ele->ptr) > LIST_MAX_ITEM_SIZE) { | ||
371 | + addReplyError(c, "Element too large"); | ||
372 | + return; | ||
373 | + } | ||
374 | + | ||
375 | /* Parse the optional arguments. */ | ||
376 | for (int j = 3; j < c->argc; j++) { | ||
377 | char *opt = c->argv[j]->ptr; | ||
378 | @@ -671,6 +695,11 @@ void lremCommand(client *c) { | ||
379 | long toremove; | ||
380 | long removed = 0; | ||
381 | |||
382 | + if (sdslen(obj->ptr) > LIST_MAX_ITEM_SIZE) { | ||
383 | + addReplyError(c, "Element too large"); | ||
384 | + return; | ||
385 | + } | ||
386 | + | ||
387 | if ((getLongFromObjectOrReply(c, c->argv[2], &toremove, NULL) != C_OK)) | ||
388 | return; | ||
389 | |||
390 | diff --git a/src/t_stream.c b/src/t_stream.c | ||
391 | index 2c30faa06..574195ee3 100644 | ||
392 | --- a/src/t_stream.c | ||
393 | +++ b/src/t_stream.c | ||
394 | @@ -47,6 +47,12 @@ | ||
395 | * setting stream_node_max_bytes to a huge number. */ | ||
396 | #define STREAM_LISTPACK_MAX_PRE_ALLOCATE 4096 | ||
397 | |||
398 | +/* Don't let listpacks grow too big, even if the user config allows it. | ||
399 | + * doing so can lead to an overflow (trying to store more than 32bit length | ||
400 | + * into the listpack header), or actually an assertion since lpInsert | ||
401 | + * will return NULL. */ | ||
402 | +#define STREAM_LISTPACK_MAX_SIZE (1<<30) | ||
403 | + | ||
404 | void streamFreeCG(streamCG *cg); | ||
405 | void streamFreeNACK(streamNACK *na); | ||
406 | size_t streamReplyWithRangeFromConsumerPEL(client *c, stream *s, streamID *start, streamID *end, size_t count, streamConsumer *consumer); | ||
407 | @@ -433,8 +439,11 @@ void streamGetEdgeID(stream *s, int first, streamID *edge_id) | ||
408 | * | ||
409 | * The function returns C_OK if the item was added, this is always true | ||
410 | * if the ID was generated by the function. However the function may return | ||
411 | - * C_ERR if an ID was given via 'use_id', but adding it failed since the | ||
412 | - * current top ID is greater or equal. */ | ||
413 | + * C_ERR in several cases: | ||
414 | + * 1. If an ID was given via 'use_id', but adding it failed since the | ||
415 | + * current top ID is greater or equal. errno will be set to EDOM. | ||
416 | + * 2. If a size of a single element or the sum of the elements is too big to | ||
417 | + * be stored into the stream. errno will be set to ERANGE. */ | ||
418 | int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_id, streamID *use_id) { | ||
419 | |||
420 | /* Generate the new entry ID. */ | ||
421 | @@ -448,7 +457,23 @@ int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_ | ||
422 | * or return an error. Automatically generated IDs might | ||
423 | * overflow (and wrap-around) when incrementing the sequence | ||
424 | part. */ | ||
425 | - if (streamCompareID(&id,&s->last_id) <= 0) return C_ERR; | ||
426 | + if (streamCompareID(&id,&s->last_id) <= 0) { | ||
427 | + errno = EDOM; | ||
428 | + return C_ERR; | ||
429 | + } | ||
430 | + | ||
431 | + /* Avoid overflow when trying to add an element to the stream (listpack | ||
432 | + * can only host up to 32bit length sttrings, and also a total listpack size | ||
433 | + * can't be bigger than 32bit length. */ | ||
434 | + size_t totelelen = 0; | ||
435 | + for (int64_t i = 0; i < numfields*2; i++) { | ||
436 | + sds ele = argv[i]->ptr; | ||
437 | + totelelen += sdslen(ele); | ||
438 | + } | ||
439 | + if (totelelen > STREAM_LISTPACK_MAX_SIZE) { | ||
440 | + errno = ERANGE; | ||
441 | + return C_ERR; | ||
442 | + } | ||
443 | |||
444 | /* Add the new entry. */ | ||
445 | raxIterator ri; | ||
446 | @@ -507,9 +532,10 @@ int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_ | ||
447 | * if we need to switch to the next one. 'lp' will be set to NULL if | ||
448 | * the current node is full. */ | ||
449 | if (lp != NULL) { | ||
450 | - if (server.stream_node_max_bytes && | ||
451 | - lp_bytes >= server.stream_node_max_bytes) | ||
452 | - { | ||
453 | + size_t node_max_bytes = server.stream_node_max_bytes; | ||
454 | + if (node_max_bytes == 0 || node_max_bytes > STREAM_LISTPACK_MAX_SIZE) | ||
455 | + node_max_bytes = STREAM_LISTPACK_MAX_SIZE; | ||
456 | + if (lp_bytes + totelelen >= node_max_bytes) { | ||
457 | lp = NULL; | ||
458 | } else if (server.stream_node_max_entries) { | ||
459 | unsigned char *lp_ele = lpFirst(lp); | ||
460 | @@ -1796,11 +1822,13 @@ void xaddCommand(client *c) { | ||
461 | /* Append using the low level function and return the ID. */ | ||
462 | streamID id; | ||
463 | if (streamAppendItem(s,c->argv+field_pos,(c->argc-field_pos)/2, | ||
464 | - &id, parsed_args.id_given ? &parsed_args.id : NULL) | ||
465 | - == C_ERR) | ||
466 | + &id, parsed_args.id_given ? &parsed_args.id : NULL) == C_ERR) | ||
467 | { | ||
468 | - addReplyError(c,"The ID specified in XADD is equal or smaller than the " | ||
469 | - "target stream top item"); | ||
470 | + if (errno == EDOM) | ||
471 | + addReplyError(c,"The ID specified in XADD is equal or smaller than " | ||
472 | + "the target stream top item"); | ||
473 | + else | ||
474 | + addReplyError(c,"Elements are too large to be stored"); | ||
475 | return; | ||
476 | } | ||
477 | addReplyStreamID(c,&id); | ||
478 | diff --git a/src/t_zset.c b/src/t_zset.c | ||
479 | index 3b9ebd2bd..2abc1b49b 100644 | ||
480 | --- a/src/t_zset.c | ||
481 | +++ b/src/t_zset.c | ||
482 | @@ -1242,15 +1242,18 @@ void zsetConvert(robj *zobj, int encoding) { | ||
483 | } | ||
484 | |||
485 | /* Convert the sorted set object into a ziplist if it is not already a ziplist | ||
486 | - * and if the number of elements and the maximum element size is within the | ||
487 | - * expected ranges. */ | ||
488 | -void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen) { | ||
489 | + * and if the number of elements and the maximum element size and total elements size | ||
490 | + * are within the expected ranges. */ | ||
491 | +void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen, size_t totelelen) { | ||
492 | if (zobj->encoding == OBJ_ENCODING_ZIPLIST) return; | ||
493 | zset *zset = zobj->ptr; | ||
494 | |||
495 | if (zset->zsl->length <= server.zset_max_ziplist_entries && | ||
496 | - maxelelen <= server.zset_max_ziplist_value) | ||
497 | - zsetConvert(zobj,OBJ_ENCODING_ZIPLIST); | ||
498 | + maxelelen <= server.zset_max_ziplist_value && | ||
499 | + ziplistSafeToAdd(NULL, totelelen)) | ||
500 | + { | ||
501 | + zsetConvert(zobj,OBJ_ENCODING_ZIPLIST); | ||
502 | + } | ||
503 | } | ||
504 | |||
505 | /* Return (by reference) the score of the specified member of the sorted set | ||
506 | @@ -1370,20 +1373,28 @@ int zsetAdd(robj *zobj, double score, sds ele, int in_flags, int *out_flags, dou | ||
507 | } | ||
508 | return 1; | ||
509 | } else if (!xx) { | ||
510 | - /* Optimize: check if the element is too large or the list | ||
511 | + /* check if the element is too large or the list | ||
512 | * becomes too long *before* executing zzlInsert. */ | ||
513 | - zobj->ptr = zzlInsert(zobj->ptr,ele,score); | ||
514 | - if (zzlLength(zobj->ptr) > server.zset_max_ziplist_entries || | ||
515 | - sdslen(ele) > server.zset_max_ziplist_value) | ||
516 | + if (zzlLength(zobj->ptr)+1 > server.zset_max_ziplist_entries || | ||
517 | + sdslen(ele) > server.zset_max_ziplist_value || | ||
518 | + !ziplistSafeToAdd(zobj->ptr, sdslen(ele))) | ||
519 | + { | ||
520 | zsetConvert(zobj,OBJ_ENCODING_SKIPLIST); | ||
521 | - if (newscore) *newscore = score; | ||
522 | - *out_flags |= ZADD_OUT_ADDED; | ||
523 | - return 1; | ||
524 | + } else { | ||
525 | + zobj->ptr = zzlInsert(zobj->ptr,ele,score); | ||
526 | + if (newscore) *newscore = score; | ||
527 | + *out_flags |= ZADD_OUT_ADDED; | ||
528 | + return 1; | ||
529 | + } | ||
530 | } else { | ||
531 | *out_flags |= ZADD_OUT_NOP; | ||
532 | return 1; | ||
533 | } | ||
534 | - } else if (zobj->encoding == OBJ_ENCODING_SKIPLIST) { | ||
535 | + } | ||
536 | + | ||
537 | + /* Note that the above block handling ziplist would have either returned or | ||
538 | + * converted the key to skiplist. */ | ||
539 | + if (zobj->encoding == OBJ_ENCODING_SKIPLIST) { | ||
540 | zset *zs = zobj->ptr; | ||
541 | zskiplistNode *znode; | ||
542 | dictEntry *de; | ||
543 | @@ -2361,7 +2372,7 @@ inline static void zunionInterAggregate(double *target, double val, int aggregat | ||
544 | } | ||
545 | } | ||
546 | |||
547 | -static int zsetDictGetMaxElementLength(dict *d) { | ||
548 | +static size_t zsetDictGetMaxElementLength(dict *d, size_t *totallen) { | ||
549 | dictIterator *di; | ||
550 | dictEntry *de; | ||
551 | size_t maxelelen = 0; | ||
552 | @@ -2371,6 +2382,8 @@ static int zsetDictGetMaxElementLength(dict *d) { | ||
553 | while((de = dictNext(di)) != NULL) { | ||
554 | sds ele = dictGetKey(de); | ||
555 | if (sdslen(ele) > maxelelen) maxelelen = sdslen(ele); | ||
556 | + if (totallen) | ||
557 | + (*totallen) += sdslen(ele); | ||
558 | } | ||
559 | |||
560 | dictReleaseIterator(di); | ||
561 | @@ -2378,7 +2391,7 @@ static int zsetDictGetMaxElementLength(dict *d) { | ||
562 | return maxelelen; | ||
563 | } | ||
564 | |||
565 | -static void zdiffAlgorithm1(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen) { | ||
566 | +static void zdiffAlgorithm1(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen, size_t *totelelen) { | ||
567 | /* DIFF Algorithm 1: | ||
568 | * | ||
569 | * We perform the diff by iterating all the elements of the first set, | ||
570 | @@ -2426,13 +2439,14 @@ static void zdiffAlgorithm1(zsetopsrc *src, long setnum, zset *dstzset, size_t * | ||
571 | znode = zslInsert(dstzset->zsl,zval.score,tmp); | ||
572 | dictAdd(dstzset->dict,tmp,&znode->score); | ||
573 | if (sdslen(tmp) > *maxelelen) *maxelelen = sdslen(tmp); | ||
574 | + (*totelelen) += sdslen(tmp); | ||
575 | } | ||
576 | } | ||
577 | zuiClearIterator(&src[0]); | ||
578 | } | ||
579 | |||
580 | |||
581 | -static void zdiffAlgorithm2(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen) { | ||
582 | +static void zdiffAlgorithm2(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen, size_t *totelelen) { | ||
583 | /* DIFF Algorithm 2: | ||
584 | * | ||
585 | * Add all the elements of the first set to the auxiliary set. | ||
586 | @@ -2486,7 +2500,7 @@ static void zdiffAlgorithm2(zsetopsrc *src, long setnum, zset *dstzset, size_t * | ||
587 | |||
588 | /* Using this algorithm, we can't calculate the max element as we go, | ||
589 | * we have to iterate through all elements to find the max one after. */ | ||
590 | - *maxelelen = zsetDictGetMaxElementLength(dstzset->dict); | ||
591 | + *maxelelen = zsetDictGetMaxElementLength(dstzset->dict, totelelen); | ||
592 | } | ||
593 | |||
594 | static int zsetChooseDiffAlgorithm(zsetopsrc *src, long setnum) { | ||
595 | @@ -2523,14 +2537,14 @@ static int zsetChooseDiffAlgorithm(zsetopsrc *src, long setnum) { | ||
596 | return (algo_one_work <= algo_two_work) ? 1 : 2; | ||
597 | } | ||
598 | |||
599 | -static void zdiff(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen) { | ||
600 | +static void zdiff(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen, size_t *totelelen) { | ||
601 | /* Skip everything if the smallest input is empty. */ | ||
602 | if (zuiLength(&src[0]) > 0) { | ||
603 | int diff_algo = zsetChooseDiffAlgorithm(src, setnum); | ||
604 | if (diff_algo == 1) { | ||
605 | - zdiffAlgorithm1(src, setnum, dstzset, maxelelen); | ||
606 | + zdiffAlgorithm1(src, setnum, dstzset, maxelelen, totelelen); | ||
607 | } else if (diff_algo == 2) { | ||
608 | - zdiffAlgorithm2(src, setnum, dstzset, maxelelen); | ||
609 | + zdiffAlgorithm2(src, setnum, dstzset, maxelelen, totelelen); | ||
610 | } else if (diff_algo != 0) { | ||
611 | serverPanic("Unknown algorithm"); | ||
612 | } | ||
613 | @@ -2565,7 +2579,7 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in | ||
614 | zsetopsrc *src; | ||
615 | zsetopval zval; | ||
616 | sds tmp; | ||
617 | - size_t maxelelen = 0; | ||
618 | + size_t maxelelen = 0, totelelen = 0; | ||
619 | robj *dstobj; | ||
620 | zset *dstzset; | ||
621 | zskiplistNode *znode; | ||
622 | @@ -2701,6 +2715,7 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in | ||
623 | tmp = zuiNewSdsFromValue(&zval); | ||
624 | znode = zslInsert(dstzset->zsl,score,tmp); | ||
625 | dictAdd(dstzset->dict,tmp,&znode->score); | ||
626 | + totelelen += sdslen(tmp); | ||
627 | if (sdslen(tmp) > maxelelen) maxelelen = sdslen(tmp); | ||
628 | } | ||
629 | } | ||
630 | @@ -2737,6 +2752,7 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in | ||
631 | /* Remember the longest single element encountered, | ||
632 | * to understand if it's possible to convert to ziplist | ||
633 | * at the end. */ | ||
634 | + totelelen += sdslen(tmp); | ||
635 | if (sdslen(tmp) > maxelelen) maxelelen = sdslen(tmp); | ||
636 | /* Update the element with its initial score. */ | ||
637 | dictSetKey(accumulator, de, tmp); | ||
638 | @@ -2771,14 +2787,14 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in | ||
639 | dictReleaseIterator(di); | ||
640 | dictRelease(accumulator); | ||
641 | } else if (op == SET_OP_DIFF) { | ||
642 | - zdiff(src, setnum, dstzset, &maxelelen); | ||
643 | + zdiff(src, setnum, dstzset, &maxelelen, &totelelen); | ||
644 | } else { | ||
645 | serverPanic("Unknown operator"); | ||
646 | } | ||
647 | |||
648 | if (dstkey) { | ||
649 | if (dstzset->zsl->length) { | ||
650 | - zsetConvertToZiplistIfNeeded(dstobj, maxelelen); | ||
651 | + zsetConvertToZiplistIfNeeded(dstobj, maxelelen, totelelen); | ||
652 | setKey(c, c->db, dstkey, dstobj); | ||
653 | addReplyLongLong(c, zsetLength(dstobj)); | ||
654 | notifyKeyspaceEvent(NOTIFY_ZSET, | ||
655 | diff --git a/src/ziplist.c b/src/ziplist.c | ||
656 | index aae86c1f2..fdc1bb9e1 100644 | ||
657 | --- a/src/ziplist.c | ||
658 | +++ b/src/ziplist.c | ||
659 | @@ -267,6 +267,17 @@ | ||
660 | ZIPLIST_LENGTH(zl) = intrev16ifbe(intrev16ifbe(ZIPLIST_LENGTH(zl))+incr); \ | ||
661 | } | ||
662 | |||
663 | +/* Don't let ziplists grow over 1GB in any case, don't wanna risk overflow in | ||
664 | + * zlbytes*/ | ||
665 | +#define ZIPLIST_MAX_SAFETY_SIZE (1<<30) | ||
666 | +int ziplistSafeToAdd(unsigned char* zl, size_t add) { | ||
667 | + size_t len = zl? ziplistBlobLen(zl): 0; | ||
668 | + if (len + add > ZIPLIST_MAX_SAFETY_SIZE) | ||
669 | + return 0; | ||
670 | + return 1; | ||
671 | +} | ||
672 | + | ||
673 | + | ||
674 | /* We use this function to receive information about a ziplist entry. | ||
675 | * Note that this is not how the data is actually encoded, is just what we | ||
676 | * get filled by a function in order to operate more easily. */ | ||
677 | @@ -709,7 +720,8 @@ unsigned char *ziplistNew(void) { | ||
678 | } | ||
679 | |||
680 | /* Resize the ziplist. */ | ||
681 | -unsigned char *ziplistResize(unsigned char *zl, unsigned int len) { | ||
682 | +unsigned char *ziplistResize(unsigned char *zl, size_t len) { | ||
683 | + assert(len < UINT32_MAX); | ||
684 | zl = zrealloc(zl,len); | ||
685 | ZIPLIST_BYTES(zl) = intrev32ifbe(len); | ||
686 | zl[len-1] = ZIP_END; | ||
687 | @@ -1070,6 +1082,9 @@ unsigned char *ziplistMerge(unsigned char **first, unsigned char **second) { | ||
688 | /* Combined zl length should be limited within UINT16_MAX */ | ||
689 | zllength = zllength < UINT16_MAX ? zllength : UINT16_MAX; | ||
690 | |||
691 | + /* larger values can't be stored into ZIPLIST_BYTES */ | ||
692 | + assert(zlbytes < UINT32_MAX); | ||
693 | + | ||
694 | /* Save offset positions before we start ripping memory apart. */ | ||
695 | size_t first_offset = intrev32ifbe(ZIPLIST_TAIL_OFFSET(*first)); | ||
696 | size_t second_offset = intrev32ifbe(ZIPLIST_TAIL_OFFSET(*second)); | ||
697 | diff --git a/src/ziplist.h b/src/ziplist.h | ||
698 | index 9e7997ad8..569e1259d 100644 | ||
699 | --- a/src/ziplist.h | ||
700 | +++ b/src/ziplist.h | ||
701 | @@ -65,6 +65,7 @@ int ziplistValidateIntegrity(unsigned char *zl, size_t size, int deep, | ||
702 | void ziplistRandomPair(unsigned char *zl, unsigned long total_count, ziplistEntry *key, ziplistEntry *val); | ||
703 | void ziplistRandomPairs(unsigned char *zl, unsigned int count, ziplistEntry *keys, ziplistEntry *vals); | ||
704 | unsigned int ziplistRandomPairsUnique(unsigned char *zl, unsigned int count, ziplistEntry *keys, ziplistEntry *vals); | ||
705 | +int ziplistSafeToAdd(unsigned char* zl, size_t add); | ||
706 | |||
707 | #ifdef REDIS_TEST | ||
708 | int ziplistTest(int argc, char *argv[], int accurate); | ||
709 | diff --git a/tests/unit/violations.tcl b/tests/unit/violations.tcl | ||
710 | new file mode 100644 | ||
711 | index 000000000..1d3140c52 | ||
712 | --- /dev/null | ||
713 | +++ b/tests/unit/violations.tcl | ||
714 | @@ -0,0 +1,156 @@ | ||
715 | +# These tests consume massive amounts of memory, and are not | ||
716 | +# suitable to be executed as part of the normal test suite | ||
717 | +set ::str500 [string repeat x 500000000] ;# 500mb | ||
718 | + | ||
719 | +# Utility function to write big argument into redis client connection | ||
720 | +proc write_big_bulk {size} { | ||
721 | + r write "\$$size\r\n" | ||
722 | + while {$size >= 500000000} { | ||
723 | + r write $::str500 | ||
724 | + incr size -500000000 | ||
725 | + } | ||
726 | + if {$size > 0} { | ||
727 | + r write [string repeat x $size] | ||
728 | + } | ||
729 | + r write "\r\n" | ||
730 | +} | ||
731 | + | ||
732 | +# One XADD with one huge 5GB field | ||
733 | +# Expected to fail resulting in an empty stream | ||
734 | +start_server [list overrides [list save ""] ] { | ||
735 | + test {XADD one huge field} { | ||
736 | + r config set proto-max-bulk-len 10000000000 ;#10gb | ||
737 | + r config set client-query-buffer-limit 10000000000 ;#10gb | ||
738 | + r write "*5\r\n\$4\r\nXADD\r\n\$2\r\nS1\r\n\$1\r\n*\r\n" | ||
739 | + r write "\$1\r\nA\r\n" | ||
740 | + write_big_bulk 5000000000 ;#5gb | ||
741 | + r flush | ||
742 | + catch {r read} err | ||
743 | + assert_match {*too large*} $err | ||
744 | + r xlen S1 | ||
745 | + } {0} | ||
746 | +} | ||
747 | + | ||
748 | +# One XADD with one huge (exactly nearly) 4GB field | ||
749 | +# This uncovers the overflow in lpEncodeGetType | ||
750 | +# Expected to fail resulting in an empty stream | ||
751 | +start_server [list overrides [list save ""] ] { | ||
752 | + test {XADD one huge field - 1} { | ||
753 | + r config set proto-max-bulk-len 10000000000 ;#10gb | ||
754 | + r config set client-query-buffer-limit 10000000000 ;#10gb | ||
755 | + r write "*5\r\n\$4\r\nXADD\r\n\$2\r\nS1\r\n\$1\r\n*\r\n" | ||
756 | + r write "\$1\r\nA\r\n" | ||
757 | + write_big_bulk 4294967295 ;#4gb-1 | ||
758 | + r flush | ||
759 | + catch {r read} err | ||
760 | + assert_match {*too large*} $err | ||
761 | + r xlen S1 | ||
762 | + } {0} | ||
763 | +} | ||
764 | + | ||
765 | +# Gradually add big stream fields using repeated XADD calls | ||
766 | +start_server [list overrides [list save ""] ] { | ||
767 | + test {several XADD big fields} { | ||
768 | + r config set stream-node-max-bytes 0 | ||
769 | + for {set j 0} {$j<10} {incr j} { | ||
770 | + r xadd stream * 1 $::str500 2 $::str500 | ||
771 | + } | ||
772 | + r ping | ||
773 | + r xlen stream | ||
774 | + } {10} | ||
775 | +} | ||
776 | + | ||
777 | +# Add over 4GB to a single stream listpack (one XADD command) | ||
778 | +# Expected to fail resulting in an empty stream | ||
779 | +start_server [list overrides [list save ""] ] { | ||
780 | + test {single XADD big fields} { | ||
781 | + r write "*23\r\n\$4\r\nXADD\r\n\$1\r\nS\r\n\$1\r\n*\r\n" | ||
782 | + for {set j 0} {$j<10} {incr j} { | ||
783 | + r write "\$1\r\n$j\r\n" | ||
784 | + write_big_bulk 500000000 ;#500mb | ||
785 | + } | ||
786 | + r flush | ||
787 | + catch {r read} err | ||
788 | + assert_match {*too large*} $err | ||
789 | + r xlen S | ||
790 | + } {0} | ||
791 | +} | ||
792 | + | ||
793 | +# Gradually add big hash fields using repeated HSET calls | ||
794 | +# This reproduces the overflow in the call to ziplistResize | ||
795 | +# Object will be converted to hashtable encoding | ||
796 | +start_server [list overrides [list save ""] ] { | ||
797 | + r config set hash-max-ziplist-value 1000000000 ;#1gb | ||
798 | + test {hash with many big fields} { | ||
799 | + for {set j 0} {$j<10} {incr j} { | ||
800 | + r hset h $j $::str500 | ||
801 | + } | ||
802 | + r object encoding h | ||
803 | + } {hashtable} | ||
804 | +} | ||
805 | + | ||
806 | +# Add over 4GB to a single hash field (one HSET command) | ||
807 | +# Object will be converted to hashtable encoding | ||
808 | +start_server [list overrides [list save ""] ] { | ||
809 | + test {hash with one huge field} { | ||
810 | + catch {r config set hash-max-ziplist-value 10000000000} ;#10gb | ||
811 | + r config set proto-max-bulk-len 10000000000 ;#10gb | ||
812 | + r config set client-query-buffer-limit 10000000000 ;#10gb | ||
813 | + r write "*4\r\n\$4\r\nHSET\r\n\$2\r\nH1\r\n" | ||
814 | + r write "\$1\r\nA\r\n" | ||
815 | + write_big_bulk 5000000000 ;#5gb | ||
816 | + r flush | ||
817 | + r read | ||
818 | + r object encoding H1 | ||
819 | + } {hashtable} | ||
820 | +} | ||
821 | + | ||
822 | +# Add over 4GB to a single list member (one LPUSH command) | ||
823 | +# Currently unsupported, and expected to fail rather than being truncated | ||
824 | +# Expected to fail resulting in a non-existing list | ||
825 | +start_server [list overrides [list save ""] ] { | ||
826 | + test {list with one huge field} { | ||
827 | + r config set proto-max-bulk-len 10000000000 ;#10gb | ||
828 | + r config set client-query-buffer-limit 10000000000 ;#10gb | ||
829 | + r write "*3\r\n\$5\r\nLPUSH\r\n\$2\r\nL1\r\n" | ||
830 | + write_big_bulk 5000000000 ;#5gb | ||
831 | + r flush | ||
832 | + catch {r read} err | ||
833 | + assert_match {*too large*} $err | ||
834 | + r exists L1 | ||
835 | + } {0} | ||
836 | +} | ||
837 | + | ||
838 | +# SORT which attempts to store an element larger than 4GB into a list. | ||
839 | +# Currently unsupported and results in an assertion instead of truncation | ||
840 | +start_server [list overrides [list save ""] ] { | ||
841 | + test {SORT adds huge field to list} { | ||
842 | + r config set proto-max-bulk-len 10000000000 ;#10gb | ||
843 | + r config set client-query-buffer-limit 10000000000 ;#10gb | ||
844 | + r write "*3\r\n\$3\r\nSET\r\n\$2\r\nS1\r\n" | ||
845 | + write_big_bulk 5000000000 ;#5gb | ||
846 | + r flush | ||
847 | + r read | ||
848 | + assert_equal [r strlen S1] 5000000000 | ||
849 | + r set S2 asdf | ||
850 | + r sadd myset 1 2 | ||
851 | + r mset D1 1 D2 2 | ||
852 | + catch {r sort myset by D* get S* store mylist} | ||
853 | + assert_equal [count_log_message 0 "crashed by signal"] 0 | ||
854 | + assert_equal [count_log_message 0 "ASSERTION FAILED"] 1 | ||
855 | + } | ||
856 | +} | ||
857 | + | ||
858 | +# SORT which stores an integer encoded element into a list. | ||
859 | +# Just for coverage, no news here. | ||
860 | +start_server [list overrides [list save ""] ] { | ||
861 | + test {SORT adds integer field to list} { | ||
862 | + r set S1 asdf | ||
863 | + r set S2 123 ;# integer encoded | ||
864 | + assert_encoding "int" S2 | ||
865 | + r sadd myset 1 2 | ||
866 | + r mset D1 1 D2 2 | ||
867 | + r sort myset by D* get S* store mylist | ||
868 | + r llen mylist | ||
869 | + } {2} | ||
870 | +} | ||
871 | -- | ||
872 | 2.17.1 | ||
873 | |||
diff --git a/meta-oe/recipes-extended/redis/redis/CVE-2021-32675.patch b/meta-oe/recipes-extended/redis/redis/CVE-2021-32675.patch new file mode 100644 index 0000000000..ab691612a9 --- /dev/null +++ b/meta-oe/recipes-extended/redis/redis/CVE-2021-32675.patch | |||
@@ -0,0 +1,129 @@ | |||
1 | From a71a65e9ed75b347c33bc882b38f4f1006fcba39 Mon Sep 17 00:00:00 2001 | ||
2 | From: Oran Agra <oran@redislabs.com> | ||
3 | Date: Wed, 9 Jun 2021 17:31:39 +0300 | ||
4 | Subject: [PATCH] Prevent unauthenticated client from easily consuming lots of | ||
5 | memory (CVE-2021-32675) | ||
6 | |||
7 | This change sets a low limit for multibulk and bulk length in the | ||
8 | protocol for unauthenticated connections, so that they can't easily | ||
9 | cause redis to allocate massive amounts of memory by sending just a few | ||
10 | characters on the network. | ||
11 | The new limits are 10 arguments of 16kb each (instead of 1m of 512mb) | ||
12 | |||
13 | CVE: CVE-2021-32675 | ||
14 | Upstream-Status: Backport[https://github.com/redis/redis/commit/5674b0057ff2903d43eaff802017eddf37c360f8] | ||
15 | |||
16 | Signed-off-by: Changqing Li <changqing.li@windriver.com> | ||
17 | --- | ||
18 | src/networking.c | 17 +++++++++++++++++ | ||
19 | src/server.c | 11 +++-------- | ||
20 | src/server.h | 1 + | ||
21 | tests/unit/auth.tcl | 16 ++++++++++++++++ | ||
22 | 4 files changed, 37 insertions(+), 8 deletions(-) | ||
23 | |||
24 | diff --git a/src/networking.c b/src/networking.c | ||
25 | index 2355a37..8e891c6 100644 | ||
26 | --- a/src/networking.c | ||
27 | +++ b/src/networking.c | ||
28 | @@ -107,6 +107,15 @@ static void clientSetDefaultAuth(client *c) { | ||
29 | !(c->user->flags & USER_FLAG_DISABLED); | ||
30 | } | ||
31 | |||
32 | +int authRequired(client *c) { | ||
33 | + /* Check if the user is authenticated. This check is skipped in case | ||
34 | + * the default user is flagged as "nopass" and is active. */ | ||
35 | + int auth_required = (!(DefaultUser->flags & USER_FLAG_NOPASS) || | ||
36 | + (DefaultUser->flags & USER_FLAG_DISABLED)) && | ||
37 | + !c->authenticated; | ||
38 | + return auth_required; | ||
39 | +} | ||
40 | + | ||
41 | client *createClient(connection *conn) { | ||
42 | client *c = zmalloc(sizeof(client)); | ||
43 | |||
44 | @@ -1855,6 +1864,10 @@ int processMultibulkBuffer(client *c) { | ||
45 | addReplyError(c,"Protocol error: invalid multibulk length"); | ||
46 | setProtocolError("invalid mbulk count",c); | ||
47 | return C_ERR; | ||
48 | + } else if (ll > 10 && authRequired(c)) { | ||
49 | + addReplyError(c, "Protocol error: unauthenticated multibulk length"); | ||
50 | + setProtocolError("unauth mbulk count", c); | ||
51 | + return C_ERR; | ||
52 | } | ||
53 | |||
54 | c->qb_pos = (newline-c->querybuf)+2; | ||
55 | @@ -1902,6 +1915,10 @@ int processMultibulkBuffer(client *c) { | ||
56 | addReplyError(c,"Protocol error: invalid bulk length"); | ||
57 | setProtocolError("invalid bulk length",c); | ||
58 | return C_ERR; | ||
59 | + } else if (ll > 16384 && authRequired(c)) { | ||
60 | + addReplyError(c, "Protocol error: unauthenticated bulk length"); | ||
61 | + setProtocolError("unauth bulk length", c); | ||
62 | + return C_ERR; | ||
63 | } | ||
64 | |||
65 | c->qb_pos = newline-c->querybuf+2; | ||
66 | diff --git a/src/server.c b/src/server.c | ||
67 | index 9932606..f65ad22 100644 | ||
68 | --- a/src/server.c | ||
69 | +++ b/src/server.c | ||
70 | @@ -3996,14 +3996,9 @@ int processCommand(client *c) { | ||
71 | int is_may_replicate_command = (c->cmd->flags & (CMD_WRITE | CMD_MAY_REPLICATE)) || | ||
72 | (c->cmd->proc == execCommand && (c->mstate.cmd_flags & (CMD_WRITE | CMD_MAY_REPLICATE))); | ||
73 | |||
74 | - /* Check if the user is authenticated. This check is skipped in case | ||
75 | - * the default user is flagged as "nopass" and is active. */ | ||
76 | - int auth_required = (!(DefaultUser->flags & USER_FLAG_NOPASS) || | ||
77 | - (DefaultUser->flags & USER_FLAG_DISABLED)) && | ||
78 | - !c->authenticated; | ||
79 | - if (auth_required) { | ||
80 | - /* AUTH and HELLO and no auth modules are valid even in | ||
81 | - * non-authenticated state. */ | ||
82 | + if (authRequired(c)) { | ||
83 | + /* AUTH and HELLO and no auth commands are valid even in | ||
84 | + * non-authenticated state. */ | ||
85 | if (!(c->cmd->flags & CMD_NO_AUTH)) { | ||
86 | rejectCommand(c,shared.noautherr); | ||
87 | return C_OK; | ||
88 | diff --git a/src/server.h b/src/server.h | ||
89 | index e256ce0..a3dfe60 100644 | ||
90 | --- a/src/server.h | ||
91 | +++ b/src/server.h | ||
92 | @@ -1894,6 +1894,7 @@ void protectClient(client *c); | ||
93 | void unprotectClient(client *c); | ||
94 | void initThreadedIO(void); | ||
95 | client *lookupClientByID(uint64_t id); | ||
96 | +int authRequired(client *c); | ||
97 | |||
98 | #ifdef __GNUC__ | ||
99 | void addReplyErrorFormat(client *c, const char *fmt, ...) | ||
100 | diff --git a/tests/unit/auth.tcl b/tests/unit/auth.tcl | ||
101 | index b63cf01..5997707 100644 | ||
102 | --- a/tests/unit/auth.tcl | ||
103 | +++ b/tests/unit/auth.tcl | ||
104 | @@ -24,6 +24,22 @@ start_server {tags {"auth"} overrides {requirepass foobar}} { | ||
105 | r set foo 100 | ||
106 | r incr foo | ||
107 | } {101} | ||
108 | + | ||
109 | + test {For unauthenticated clients multibulk and bulk length are limited} { | ||
110 | + set rr [redis [srv "host"] [srv "port"] 0 $::tls] | ||
111 | + $rr write "*100\r\n" | ||
112 | + $rr flush | ||
113 | + catch {[$rr read]} e | ||
114 | + assert_match {*unauthenticated multibulk length*} $e | ||
115 | + $rr close | ||
116 | + | ||
117 | + set rr [redis [srv "host"] [srv "port"] 0 $::tls] | ||
118 | + $rr write "*1\r\n\$100000000\r\n" | ||
119 | + $rr flush | ||
120 | + catch {[$rr read]} e | ||
121 | + assert_match {*unauthenticated bulk length*} $e | ||
122 | + $rr close | ||
123 | + } | ||
124 | } | ||
125 | |||
126 | start_server {tags {"auth_binary_password"}} { | ||
127 | -- | ||
128 | 2.17.1 | ||
129 | |||
diff --git a/meta-oe/recipes-extended/redis/redis/CVE-2021-32687.patch b/meta-oe/recipes-extended/redis/redis/CVE-2021-32687.patch new file mode 100644 index 0000000000..fe04e67f30 --- /dev/null +++ b/meta-oe/recipes-extended/redis/redis/CVE-2021-32687.patch | |||
@@ -0,0 +1,67 @@ | |||
1 | From a40ee258accdaf56c23950a6371307ca1aa69f06 Mon Sep 17 00:00:00 2001 | ||
2 | From: Oran Agra <oran@redislabs.com> | ||
3 | Date: Sun, 26 Sep 2021 15:42:17 +0300 | ||
4 | Subject: [PATCH] Fix Integer overflow issue with intsets (CVE-2021-32687) | ||
5 | |||
6 | The vulnerability involves changing the default set-max-intset-entries | ||
7 | configuration parameter to a very large value and constructing specially | ||
8 | crafted commands to manipulate sets | ||
9 | |||
10 | CVE: CVE-2021-32687 | ||
11 | Upstream-Status: Backport[https://github.com/redis/redis/commit/a30d367a71b7017581cf1ca104242a3c644dec0f] | ||
12 | |||
13 | Signed-off-by: Changqing Li <changqing.li@windriver.com> | ||
14 | --- | ||
15 | src/intset.c | 3 ++- | ||
16 | src/rdb.c | 4 +++- | ||
17 | src/t_set.c | 5 ++++- | ||
18 | 3 files changed, 9 insertions(+), 3 deletions(-) | ||
19 | |||
20 | diff --git a/src/intset.c b/src/intset.c | ||
21 | index 9ba1389..e366851 100644 | ||
22 | --- a/src/intset.c | ||
23 | +++ b/src/intset.c | ||
24 | @@ -104,7 +104,8 @@ intset *intsetNew(void) { | ||
25 | |||
26 | /* Resize the intset */ | ||
27 | static intset *intsetResize(intset *is, uint32_t len) { | ||
28 | - uint32_t size = len*intrev32ifbe(is->encoding); | ||
29 | + uint64_t size = (uint64_t)len*intrev32ifbe(is->encoding); | ||
30 | + assert(size <= SIZE_MAX - sizeof(intset)); | ||
31 | is = zrealloc(is,sizeof(intset)+size); | ||
32 | return is; | ||
33 | } | ||
34 | diff --git a/src/rdb.c b/src/rdb.c | ||
35 | index 6f2f516..37b1e0b 100644 | ||
36 | --- a/src/rdb.c | ||
37 | +++ b/src/rdb.c | ||
38 | @@ -1562,7 +1562,9 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) { | ||
39 | if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL; | ||
40 | |||
41 | /* Use a regular set when there are too many entries. */ | ||
42 | - if (len > server.set_max_intset_entries) { | ||
43 | + size_t max_entries = server.set_max_intset_entries; | ||
44 | + if (max_entries >= 1<<30) max_entries = 1<<30; | ||
45 | + if (len > max_entries) { | ||
46 | o = createSetObject(); | ||
47 | /* It's faster to expand the dict to the right size asap in order | ||
48 | * to avoid rehashing */ | ||
49 | diff --git a/src/t_set.c b/src/t_set.c | ||
50 | index b655b71..d50a05a 100644 | ||
51 | --- a/src/t_set.c | ||
52 | +++ b/src/t_set.c | ||
53 | @@ -66,7 +66,10 @@ int setTypeAdd(robj *subject, sds value) { | ||
54 | if (success) { | ||
55 | /* Convert to regular set when the intset contains | ||
56 | * too many entries. */ | ||
57 | - if (intsetLen(subject->ptr) > server.set_max_intset_entries) | ||
58 | + size_t max_entries = server.set_max_intset_entries; | ||
59 | + /* limit to 1G entries due to intset internals. */ | ||
60 | + if (max_entries >= 1<<30) max_entries = 1<<30; | ||
61 | + if (intsetLen(subject->ptr) > max_entries) | ||
62 | setTypeConvert(subject,OBJ_ENCODING_HT); | ||
63 | return 1; | ||
64 | } | ||
65 | -- | ||
66 | 2.17.1 | ||
67 | |||
diff --git a/meta-oe/recipes-extended/redis/redis/CVE-2021-32762.patch b/meta-oe/recipes-extended/redis/redis/CVE-2021-32762.patch new file mode 100644 index 0000000000..ec6e2fbd5b --- /dev/null +++ b/meta-oe/recipes-extended/redis/redis/CVE-2021-32762.patch | |||
@@ -0,0 +1,68 @@ | |||
1 | From 4b1de5438ad9ef2236c379f2f78feb9f1fd9796e Mon Sep 17 00:00:00 2001 | ||
2 | From: Oran Agra <oran@redislabs.com> | ||
3 | Date: Mon, 4 Oct 2021 12:10:17 +0300 | ||
4 | Subject: [PATCH] Fix redis-cli / redis-sential overflow on some platforms | ||
5 | (CVE-2021-32762) (#9587) | ||
6 | |||
7 | The redis-cli command line tool and redis-sentinel service may be vulnerable | ||
8 | to integer overflow when parsing specially crafted large multi-bulk network | ||
9 | replies. This is a result of a vulnerability in the underlying hiredis | ||
10 | library which does not perform an overflow check before calling the calloc() | ||
11 | heap allocation function. | ||
12 | |||
13 | This issue only impacts systems with heap allocators that do not perform their | ||
14 | own overflow checks. Most modern systems do and are therefore not likely to | ||
15 | be affected. Furthermore, by default redis-sentinel uses the jemalloc allocator | ||
16 | which is also not vulnerable. | ||
17 | |||
18 | Co-authored-by: Yossi Gottlieb <yossigo@gmail.com> | ||
19 | |||
20 | CVE: CVE-2021-32762 | ||
21 | Upstream-Status: Backport[https://github.com/redis/redis/commit/0215324a66af949be39b34be2d55143232c1cb71] | ||
22 | |||
23 | Signed-off-by: Changqing Li <changqing.li@windriver.com> | ||
24 | --- | ||
25 | deps/hiredis/hiredis.c | 1 + | ||
26 | deps/hiredis/test.c | 14 ++++++++++++++ | ||
27 | 2 files changed, 15 insertions(+) | ||
28 | |||
29 | diff --git a/deps/hiredis/hiredis.c b/deps/hiredis/hiredis.c | ||
30 | index 51f22a6..990f619 100644 | ||
31 | --- a/deps/hiredis/hiredis.c | ||
32 | +++ b/deps/hiredis/hiredis.c | ||
33 | @@ -174,6 +174,7 @@ static void *createArrayObject(const redisReadTask *task, size_t elements) { | ||
34 | return NULL; | ||
35 | |||
36 | if (elements > 0) { | ||
37 | + if (SIZE_MAX / sizeof(redisReply*) < elements) return NULL; /* Don't overflow */ | ||
38 | r->element = hi_calloc(elements,sizeof(redisReply*)); | ||
39 | if (r->element == NULL) { | ||
40 | freeReplyObject(r); | ||
41 | diff --git a/deps/hiredis/test.c b/deps/hiredis/test.c | ||
42 | index 8295367..bdff74e 100644 | ||
43 | --- a/deps/hiredis/test.c | ||
44 | +++ b/deps/hiredis/test.c | ||
45 | @@ -498,6 +498,20 @@ static void test_reply_reader(void) { | ||
46 | freeReplyObject(reply); | ||
47 | redisReaderFree(reader); | ||
48 | |||
49 | + test("Multi-bulk never overflows regardless of maxelements: "); | ||
50 | + size_t bad_mbulk_len = (SIZE_MAX / sizeof(void *)) + 3; | ||
51 | + char bad_mbulk_reply[100]; | ||
52 | + snprintf(bad_mbulk_reply, sizeof(bad_mbulk_reply), "*%llu\r\n+asdf\r\n", | ||
53 | + (unsigned long long) bad_mbulk_len); | ||
54 | + | ||
55 | + reader = redisReaderCreate(); | ||
56 | + reader->maxelements = 0; /* Don't rely on default limit */ | ||
57 | + redisReaderFeed(reader, bad_mbulk_reply, strlen(bad_mbulk_reply)); | ||
58 | + ret = redisReaderGetReply(reader,&reply); | ||
59 | + test_cond(ret == REDIS_ERR && strcasecmp(reader->errstr, "Out of memory") == 0); | ||
60 | + freeReplyObject(reply); | ||
61 | + redisReaderFree(reader); | ||
62 | + | ||
63 | #if LLONG_MAX > SIZE_MAX | ||
64 | test("Set error when array > SIZE_MAX: "); | ||
65 | reader = redisReaderCreate(); | ||
66 | -- | ||
67 | 2.17.1 | ||
68 | |||
diff --git a/meta-oe/recipes-extended/redis/redis/CVE-2021-41099.patch b/meta-oe/recipes-extended/redis/redis/CVE-2021-41099.patch new file mode 100644 index 0000000000..ce0e112aeb --- /dev/null +++ b/meta-oe/recipes-extended/redis/redis/CVE-2021-41099.patch | |||
@@ -0,0 +1,47 @@ | |||
1 | From fd25ce2108994b7781269143bdfb3403faa2f1d1 Mon Sep 17 00:00:00 2001 | ||
2 | From: YiyuanGUO <yguoaz@gmail.com> | ||
3 | Date: Wed, 29 Sep 2021 10:20:35 +0300 | ||
4 | Subject: [PATCH] Fix integer overflow in _sdsMakeRoomFor (CVE-2021-41099) | ||
5 | |||
6 | CVE: CVE-2021-41099 | ||
7 | Upstream-Status: Backport[https://github.com/redis/redis/commit/c6ad876774f3cc11e32681ea02a2eead00f2c521] | ||
8 | |||
9 | Signed-off-by: Changqing Li <changqing.li@windriver.com> | ||
10 | --- | ||
11 | src/sds.c | 6 +++--- | ||
12 | 1 file changed, 3 insertions(+), 3 deletions(-) | ||
13 | |||
14 | diff --git a/src/sds.c b/src/sds.c | ||
15 | index 2ec3aa7..5eadae5 100644 | ||
16 | --- a/src/sds.c | ||
17 | +++ b/src/sds.c | ||
18 | @@ -233,7 +233,7 @@ void sdsclear(sds s) { | ||
19 | sds sdsMakeRoomFor(sds s, size_t addlen) { | ||
20 | void *sh, *newsh; | ||
21 | size_t avail = sdsavail(s); | ||
22 | - size_t len, newlen; | ||
23 | + size_t len, newlen, reqlen; | ||
24 | char type, oldtype = s[-1] & SDS_TYPE_MASK; | ||
25 | int hdrlen; | ||
26 | size_t usable; | ||
27 | @@ -243,7 +243,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { | ||
28 | |||
29 | len = sdslen(s); | ||
30 | sh = (char*)s-sdsHdrSize(oldtype); | ||
31 | - newlen = (len+addlen); | ||
32 | + reqlen = newlen = (len+addlen); | ||
33 | assert(newlen > len); /* Catch size_t overflow */ | ||
34 | if (newlen < SDS_MAX_PREALLOC) | ||
35 | newlen *= 2; | ||
36 | @@ -258,7 +258,7 @@ sds sdsMakeRoomFor(sds s, size_t addlen) { | ||
37 | if (type == SDS_TYPE_5) type = SDS_TYPE_8; | ||
38 | |||
39 | hdrlen = sdsHdrSize(type); | ||
40 | - assert(hdrlen + newlen + 1 > len); /* Catch size_t overflow */ | ||
41 | + assert(hdrlen + newlen + 1 > reqlen); /* Catch size_t overflow */ | ||
42 | if (oldtype==type) { | ||
43 | newsh = s_realloc_usable(sh, hdrlen+newlen+1, &usable); | ||
44 | if (newsh == NULL) return NULL; | ||
45 | -- | ||
46 | 2.17.1 | ||
47 | |||
diff --git a/meta-oe/recipes-extended/redis/redis_6.2.2.bb b/meta-oe/recipes-extended/redis/redis_6.2.2.bb index ad675e9e04..4317c10605 100644 --- a/meta-oe/recipes-extended/redis/redis_6.2.2.bb +++ b/meta-oe/recipes-extended/redis/redis_6.2.2.bb | |||
@@ -20,6 +20,12 @@ SRC_URI = "http://download.redis.io/releases/${BP}.tar.gz \ | |||
20 | file://fix-CVE-2021-29478.patch \ | 20 | file://fix-CVE-2021-29478.patch \ |
21 | file://fix-CVE-2021-32625.patch \ | 21 | file://fix-CVE-2021-32625.patch \ |
22 | file://CVE-2021-32761.patch \ | 22 | file://CVE-2021-32761.patch \ |
23 | file://CVE-2021-41099.patch \ | ||
24 | file://CVE-2021-32762.patch \ | ||
25 | file://CVE-2021-32687.patch \ | ||
26 | file://CVE-2021-32675.patch \ | ||
27 | file://CVE-2021-32627-CVE-2021-32628.patch \ | ||
28 | file://CVE-2021-32626.patch \ | ||
23 | " | 29 | " |
24 | SRC_URI[sha256sum] = "7a260bb74860f1b88c3d5942bf8ba60ca59f121c6dce42d3017bed6add0b9535" | 30 | SRC_URI[sha256sum] = "7a260bb74860f1b88c3d5942bf8ba60ca59f121c6dce42d3017bed6add0b9535" |
25 | 31 | ||