Skip to content

Commit

Permalink
Fix IntegrityError when running zenossdbpack (#4584)
Browse files Browse the repository at this point in the history
* Fix IntegrityError when running zenossdbpack

Fixes ZEN-35111.

*Resolved a IntegrityError that occurred
during pre_pack procedure.
  • Loading branch information
vsaliieva authored Oct 23, 2024
1 parent 19a0090 commit c8badd8
Showing 1 changed file with 95 additions and 1 deletion.
96 changes: 95 additions & 1 deletion Products/ZenUtils/patches/zodbpackmonkey.py
Original file line number Diff line number Diff line change
Expand Up @@ -621,7 +621,9 @@ def pack(self, pack_tid, packed_func=None):

prevent_pke_oids = self.remove_connected_oids(conn, cursor, grouped_oids, packed_func, total, oids_processed)

self._pack_cleanup(conn, cursor)
store_connection = PrePackConnection(self.connmanager)

self._pack_cleanup(store_connection=store_connection)

try:
if skipped_oids:
Expand All @@ -640,6 +642,98 @@ def pack(self, pack_tid, packed_func=None):
finally:
self.connmanager.close(conn, cursor)

@monkeypatch('relstorage.adapters.packundo.HistoryFreePackUndo')
def _add_refs_for_oids(self, load_batcher, store_batcher, oids, get_references):

"""
Fill object_refs with the states for some objects.
Returns the number of references added.
"""
# oids should be a slice of an ``OidList``, which may be an
# ``array.array``; those are relatively slow to iterate.

# The batcher always does deletes before inserts, which is
# exactly what we want.
# In the past, we performed all deletes and then all inserts;
# now, things to batching, they could be interleaved, but
# because we process OID-by-OID, that should be fine.
# In the past, we also DELETED from object_refs_added and object_ref
# everything found in the ``oids`` parameter; now we only do a delete if
# we get back a row from object_state; again, that shouldn't matter, rows
# should be found in object_state.
object_ref_schema = store_batcher.row_schema_of_length(3)
object_refs_added_schema = store_batcher.row_schema_of_length(2)

# Use the batcher to get efficient ``= ANY()``
# queries, but go ahead and collect into a list at once
rows = list(load_batcher.select_from(
('zoid', 'tid', 'state'),
'object_state',
suffix=' ORDER BY zoid ',
zoid=oids
))

num_refs_found = 0

for from_oid, tid, state in rows:
state = self.driver.binary_column_as_state_type(state)
row = (from_oid, tid)
# Check if the row already exists
existing_row = store_batcher.select_from(
'object_refs_added',
('zoid',),
zoid=from_oid
)

# We monkey-patched this method to add the check below (everything else is the same as the original
# relstorage.adapters.packundo.HistoryFreePackUndo._add_refs_for_oids).
# This check helps avoid an IntegrityError,
# which started occurring after upgrading RelStorage to version 3.5.0.
# Previously, before inserting the zoid, it was first deleted from the object_refs_added table.
if not existing_row:
store_batcher.insert_into(
'object_refs_added (zoid, tid)',
object_refs_added_schema,
row,
row,
size=2
)

store_batcher.delete_from(
'object_refs_added',
zoid=from_oid
)
store_batcher.delete_from(
'object_ref',
zoid=from_oid
)

if state:
try:
to_oids = get_references(state)
except:
log.exception(
"pre_pack: can't unpickle "
"object %d in transaction %d; state length = %d",
from_oid, tid, len(state)
)
raise

for to_oid in to_oids:
row = (from_oid, tid, to_oid)
num_refs_found += 1
store_batcher.insert_into(
'object_ref (zoid, tid, to_zoid)',
object_ref_schema,
row,
row,
size=3
)

return num_refs_found


'''
Methods added to support packing systems that have not been packed for a long time
and that cause zenossdbpack to crash with an OOM error
Expand Down

0 comments on commit c8badd8

Please sign in to comment.