summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorJo-Philipp Wich <jo@mein.io>2024-10-16 11:43:31 +0200
committerJo-Philipp Wich <jo@mein.io>2024-10-17 09:15:21 +0200
commit736d4508420222321468feedd60e7cb5063b574d (patch)
tree8d4680166f8bfc5202165ac9bc08677590e2a2b1
parent9cf53dda36bc25b513ec1b1cdfc851a10b37473f (diff)
types: fix potential use after free on adding keys during iteration
When keys are added to the object currently being iterated by a for loop, the insert operation might cause a hashtable resize with a subsequent memory reallocation and a different table base pointer, clobbering the entry pointers held by iterators pointing to the containing object of the resized table. In order to address this issue while keeping the iteration overhead low, extend the object key insert logic to check whether the insertion will trigger a reallocation and backup and restore the iterator pointers when needed. This slightly increases the size of the iterator states but the overhead for this should be neglectible as there'll only be a low amount of concurrently active iterations at any time. Fixes: #230 Signed-off-by: Jo-Philipp Wich <jo@mein.io>
-rw-r--r--include/ucode/types.h9
-rw-r--r--tests/custom/99_bugs/48_use_after_free_on_iteration_insert40
-rw-r--r--types.c39
-rw-r--r--vm.c11
4 files changed, 91 insertions, 8 deletions
diff --git a/include/ucode/types.h b/include/ucode/types.h
index c0ccd38..2334029 100644
--- a/include/ucode/types.h
+++ b/include/ucode/types.h
@@ -213,7 +213,14 @@ extern uc_list_t uc_object_iterators;
typedef struct {
uc_list_t list;
- struct lh_entry *pos;
+ struct lh_table *table;
+ union {
+ struct lh_entry *pos;
+ struct {
+ const void *k;
+ unsigned long hash;
+ } kh;
+ } u;
} uc_object_iterator_t;
diff --git a/tests/custom/99_bugs/48_use_after_free_on_iteration_insert b/tests/custom/99_bugs/48_use_after_free_on_iteration_insert
new file mode 100644
index 0000000..558f83a
--- /dev/null
+++ b/tests/custom/99_bugs/48_use_after_free_on_iteration_insert
@@ -0,0 +1,40 @@
+Ensure that adding keys to an object currently being iterated will not
+clobber active iterators pointing into that object due to a reallocation
+of the underlying hash table array.
+
+-- Testcase --
+{%
+ let obj = { '0': 0, '1': 1 };
+ let i = 2;
+
+ for (let k, v in obj) {
+ while (i < 16) {
+ obj[i] = i;
+ i++;
+ }
+ }
+
+ printf("%.J\n", obj);
+%}
+-- End --
+
+-- Expect stdout --
+{
+ "0": 0,
+ "1": 1,
+ "2": 2,
+ "3": 3,
+ "4": 4,
+ "5": 5,
+ "6": 6,
+ "7": 7,
+ "8": 8,
+ "9": 9,
+ "10": 10,
+ "11": 11,
+ "12": 12,
+ "13": 13,
+ "14": 14,
+ "15": 15
+}
+-- End --
diff --git a/types.c b/types.c
index 84a7b17..c4bc456 100644
--- a/types.c
+++ b/types.c
@@ -896,8 +896,8 @@ ucv_free_object_entry(struct lh_entry *entry)
uc_list_foreach(item, &uc_object_iterators) {
uc_object_iterator_t *iter = (uc_object_iterator_t *)item;
- if (iter->pos == entry)
- iter->pos = entry->next;
+ if (iter->u.pos == entry)
+ iter->u.pos = entry->next;
}
free(lh_entry_k(entry));
@@ -946,6 +946,24 @@ ucv_object_add(uc_value_t *uv, const char *key, uc_value_t *val)
existing_entry = lh_table_lookup_entry_w_hash(object->table, (const void *)key, hash);
if (existing_entry == NULL) {
+ bool rehash = (object->table->count >= object->table->size * LH_LOAD_FACTOR);
+
+ /* insert will rehash table, backup affected iterator states */
+ if (rehash) {
+ uc_list_foreach(item, &uc_object_iterators) {
+ uc_object_iterator_t *iter = (uc_object_iterator_t *)item;
+
+ if (iter->table != object->table)
+ continue;
+
+ if (iter->u.pos == NULL)
+ continue;
+
+ iter->u.kh.k = iter->u.pos->k;
+ iter->u.kh.hash = lh_get_hash(iter->table, iter->u.kh.k);
+ }
+ }
+
k = xstrdup(key);
if (lh_table_insert_w_hash(object->table, k, val, hash, 0) != 0) {
@@ -954,6 +972,23 @@ ucv_object_add(uc_value_t *uv, const char *key, uc_value_t *val)
return false;
}
+ /* restore affected iterator state pointer after rehash */
+ if (rehash) {
+ uc_list_foreach(item, &uc_object_iterators) {
+ uc_object_iterator_t *iter = (uc_object_iterator_t *)item;
+
+ if (iter->table != object->table)
+ continue;
+
+ if (iter->u.kh.k == NULL)
+ continue;
+
+ iter->u.pos = lh_table_lookup_entry_w_hash(iter->table,
+ iter->u.kh.k,
+ iter->u.kh.hash);
+ }
+ }
+
return true;
}
diff --git a/vm.c b/vm.c
index bb6dc2f..b492236 100644
--- a/vm.c
+++ b/vm.c
@@ -2225,7 +2225,8 @@ uc_vm_object_iterator_next(uc_vm_t *vm, uc_vm_insn_t insn,
res->type = &uc_vm_object_iterator_type;
iter = res->data = (char *)res + sizeof(*res);
- iter->pos = obj->table->head;
+ iter->table = obj->table;
+ iter->u.pos = obj->table->head;
uc_list_insert(&uc_object_iterators, &iter->list);
}
@@ -2241,21 +2242,21 @@ uc_vm_object_iterator_next(uc_vm_t *vm, uc_vm_insn_t insn,
}
/* no next key */
- if (!iter->pos) {
+ if (!iter->u.pos) {
uc_list_remove(&iter->list);
return false;
}
- uc_vm_stack_push(vm, ucv_string_new(iter->pos->k));
+ uc_vm_stack_push(vm, ucv_string_new(iter->u.pos->k));
if (insn == I_NEXTKV)
- uc_vm_stack_push(vm, ucv_get((uc_value_t *)iter->pos->v));
+ uc_vm_stack_push(vm, ucv_get((uc_value_t *)iter->u.pos->v));
uc_vm_stack_push(vm, &res->header);
ucv_put(v);
- iter->pos = iter->pos->next;
+ iter->u.pos = iter->u.pos->next;
return true;
}