Skip to content

Commit

Permalink
PBM-929 added test
Browse files Browse the repository at this point in the history
  • Loading branch information
olexandr-havryliak committed Apr 17, 2024
1 parent e9f372f commit c603245
Showing 1 changed file with 29 additions and 96 deletions.
125 changes: 29 additions & 96 deletions pbm-functional/pytest/test_replicaset.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

from datetime import datetime
from cluster import Cluster
from packaging import version

documents=[{"a": 1}, {"b": 2}, {"c": 3}, {"d": 4}]

Expand Down Expand Up @@ -80,100 +81,32 @@ def test_incremental(start_cluster,cluster):
assert pymongo.MongoClient(cluster.connection)["test"]["test"].count_documents({}) == len(documents)
Cluster.log("Finished successfully")

@pytest.mark.timeout(3600,func_only=True)
@pytest.mark.parametrize('backup_t',['logic','phys'])
def test_load(start_cluster,cluster,backup_t):
backup_type = 'logical'
if backup_t == "phys":
backup_type = 'physical'

# run transactions, returns array of tuples each includes oplog timestamp, oplog increment, resulted documents count
def background_transaction(db,collection):
Cluster.log("Starting background insert to " + collection)
j = 0
result = []
while upsert:
data = random.randbytes(1024 * 1024)
client = pymongo.MongoClient(cluster.connection)
with client.start_session() as session:
try:
with session.start_transaction():
for i in range(20):
client[db][collection].insert_one({str(i): data }, session=session)
timeout = random.uniform(0.4,0.6)
time.sleep(timeout)
session.commit_transaction()
j = j + 20
Cluster.log(collection + ": (" + str(session.cluster_time['clusterTime'].time) + " " + str(session.cluster_time['clusterTime'].inc) + ") " + str(j))
result.append((session.cluster_time['clusterTime'].time,session.cluster_time['clusterTime'].inc,j))
except pymongo.errors.PyMongoError as e:
Cluster.log(e)
continue
finally:
client.close()
Cluster.log("Stopping background insert to " + collection)
return result

cluster.exec_pbm_cli("config --set storage.type=filesystem --set storage.filesystem.path=/backups --set backup.compression=none --out json")
pymongo.MongoClient(cluster.connection).admin.command( { "setParameter": 1, "wiredTigerEngineRuntimeConfig": "cache_size=1G"} )
@pytest.mark.timeout(300,func_only=True)
def test_logical_timeseries_PBM_T224(start_cluster,cluster):
cluster.check_pbm_status()
upsert=True
background_transaction1 = concurrent.futures.ThreadPoolExecutor().submit(background_transaction, 'test', 'test1')
background_transaction2 = concurrent.futures.ThreadPoolExecutor().submit(background_transaction, 'test', 'test2')

time.sleep(60)
try:
backup=cluster.make_backup(backup_type)
except AssertionError as e:
upsert=False
background_transaction1.result()
background_transaction2.result()
assert False, e

upsert=False
upsert1_result = background_transaction1.result()
upsert2_result = background_transaction2.result()
Cluster.log("test1 documents count: " + str(pymongo.MongoClient(cluster.connection)["test"]["test1"].count_documents({})))
Cluster.log("test2 documents count: " + str(pymongo.MongoClient(cluster.connection)["test"]["test2"].count_documents({})))

# backup_meta=json.loads(cluster.exec_pbm_cli("describe-backup " + backup + " --out=json").stdout)
# since pbm describe-backup doesn't return exact oplog timestamp let's check metadata on the storage
backup_meta=json.loads(testinfra.get_host("docker://rs101").check_output('cat /backups/' + backup + '.pbm.json'))
Cluster.log(json.dumps(backup_meta, indent=4))
last_write_ts = (backup_meta["last_write_ts"]["T"],backup_meta["last_write_ts"]["I"])

# let's find the real count of documents inserted till last_write_ts
# we have the array of tuples containing oplog timestamp and resulted documents count like
# [(1709134884, 23, 20), (1709134886, 5, 40), (1709134887, 39, 60), (1709134889, 6, 80), (1709134891, 5, 100), (1709134955, 4, 120)]
# the second argument is last_write_ts like 1709134954.11
# the result should be (1709134891, 5, 100)
# result should be t[1] from the resulted tuple
def find_inserted(array_tuples,timestamp):
print(array_tuples)
print(timestamp)
filtered_first = [t for t in array_tuples if ( t[0] < timestamp[0] ) or ( t[0] == timestamp[0] and t[1] <= timestamp[1] ) ]
max_first = max(t[0] for t in filtered_first)
filtered_second = [t for t in filtered_first if t[0] == max_first]
result = max(filtered_second, key=lambda t: t[1])
print(result)
return result[2]

inserted_test1 = find_inserted(upsert1_result,last_write_ts)
inserted_test2 = find_inserted(upsert2_result,last_write_ts)
Cluster.log("test1 inserted count: " + str(inserted_test1))
Cluster.log("test2 inserted count: " + str(inserted_test2))

if backup_type == "logical":
restart = False
else:
restart = True
cluster.make_restore(backup,restart_cluster=restart,check_pbm_status=True,timeout=600)

count_test1 = pymongo.MongoClient(cluster.connection)["test"]["test1"].count_documents({})
count_test2 = pymongo.MongoClient(cluster.connection)["test"]["test2"].count_documents({})
Cluster.log("test1 documents count: " + str(count_test1))
Cluster.log("test2 documents count: " + str(count_test2))

assert inserted_test1 == count_test1 or inserted_test1 + 20 == count_test1
assert inserted_test2 == count_test2 or inserted_test2 + 20 == count_test2
Cluster.log("Finished successfully\n")
client=pymongo.MongoClient(cluster.connection)
mongod_version=client.server_info()["version"]
if version.parse(mongod_version) < version.parse("5.0.0"):
pytest.skip("Unsupported version for timeseries")
pymongo.MongoClient(cluster.connection)["test"].create_collection('test1',timeseries={'timeField':'timestamp','metaField': 'data'})
for i in range(10):
pymongo.MongoClient(cluster.connection)["test"]["test1"].insert_one({"timestamp": datetime.now(), "data": i})
time.sleep(0.1)
cluster.make_backup("logical")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
time.sleep(30)
#create new timeseries collection
pymongo.MongoClient(cluster.connection)["test"].create_collection('test2',timeseries={'timeField':'timestamp','metaField': 'data'})
for i in range(10):
pymongo.MongoClient(cluster.connection)["test"]["test1"].insert_one({"timestamp": datetime.now(), "data": i})
pymongo.MongoClient(cluster.connection)["test"]["test2"].insert_one({"timestamp": datetime.now(), "data": i})
time.sleep(0.1)
pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
backup="--time=" + pitr
Cluster.log("Time for PITR is: " + pitr)
time.sleep(30)
pymongo.MongoClient(cluster.connection).drop_database('test')
cluster.make_restore(backup,check_pbm_status=True)

Check failure on line 109 in pbm-functional/pytest/test_replicaset.py

View workflow job for this annotation

GitHub Actions / JUnit Test Report

test_replicaset.test_logical_timeseries_PBM_T224

AssertionError: Starting restore 2024-04-17T06:25:56.449523336Z to point-in-time 2024-04-17T06:25:26 from '2024-04-17T06:24:36Z'...Started logical restore. Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1713335123.1713335153: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1713335124,"I":2},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"test.system.views","Object":[{"Key":"_id","Value":"test.test2"},{"Key":"viewOn","Value":"system.buckets.test2"},{"Key":"pipeline","Value":[[{"Key":"$_internalUnpackBucket","Value":[{"Key":"timeField","Value":"timestamp"},{"Key":"metaField","Value":"data"},{"Key":"bucketMaxSpanSeconds","Value":3600}]}]]}],"Query":[{"Key":"_id","Value":"test.test2"}],"UI":{"Subtype":4,"Data":"ZoBbprWeR/2FLoWfPqd/hQ=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null} | merr <nil>: applyOps: (NamespaceNotFound) Failed to apply operation due to missing collection (66805ba6-b59e-47fd-852e-859f3ea77f85): { ts: Timestamp(1713335124, 2), t: 1, v: 2, op: "i", ns: "test.system.views", o: { _id: "test.test2", viewOn: "system.buckets.test2", pipeline: [ { $_internalUnpackBucket: { timeField: "timestamp", metaField: "data", bucketMaxSpanSeconds: 3600 } } ] }, o2: { _id: "test.test2" }, ui: UUID("66805ba6-b59e-47fd-852e-859f3ea77f85"), h: 0, wall: new Date(0) }
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f21c98e7a50>

    @pytest.mark.timeout(300,func_only=True)
    def test_logical_timeseries_PBM_T224(start_cluster,cluster):
        cluster.check_pbm_status()
        client=pymongo.MongoClient(cluster.connection)
        mongod_version=client.server_info()["version"]
        if version.parse(mongod_version) < version.parse("5.0.0"):
            pytest.skip("Unsupported version for timeseries")
        pymongo.MongoClient(cluster.connection)["test"].create_collection('test1',timeseries={'timeField':'timestamp','metaField': 'data'})
        for i in range(10):
            pymongo.MongoClient(cluster.connection)["test"]["test1"].insert_one({"timestamp": datetime.now(), "data": i})
            time.sleep(0.1)
        cluster.make_backup("logical")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
        time.sleep(30)
        #create new timeseries collection
        pymongo.MongoClient(cluster.connection)["test"].create_collection('test2',timeseries={'timeField':'timestamp','metaField': 'data'})
        for i in range(10):
            pymongo.MongoClient(cluster.connection)["test"]["test1"].insert_one({"timestamp": datetime.now(), "data": i})
            pymongo.MongoClient(cluster.connection)["test"]["test2"].insert_one({"timestamp": datetime.now(), "data": i})
            time.sleep(0.1)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        backup="--time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(30)
        pymongo.MongoClient(cluster.connection).drop_database('test')
>       cluster.make_restore(backup,check_pbm_status=True)

test_replicaset.py:109: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <cluster.Cluster object at 0x7f21c98e7a50>
name = '--time=2024-04-17T06:25:26', kwargs = {'check_pbm_status': True}
result = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f21cd13ad10>, exit_status=1, command=b'time...o2: { _id: "test.test2" }, ui: UUID("66805ba6-b59e-47fd-852e-859f3ea77f85"), h: 0, wall: new Date(0) }\n', _stderr=b'')
n = <testinfra.host.Host docker://rs101>, timeout = 240, error = ''
host = 'rs103', container = <Container: 3d883f269d13>

    def make_restore(self, name, **kwargs):
        if self.layout == "sharded":
            client = pymongo.MongoClient(self.connection)
            result = client.admin.command("balancerStop")
            client.close()
            Cluster.log("Stopping balancer: " + str(result))
            self.stop_mongos()
        self.stop_arbiters()
        n = testinfra.get_host("docker://" + self.pbm_cli)
        timeout = time.time() + 60
    
        while True:
            if not self.get_status()['running']:
                break
            if time.time() > timeout:
                assert False, "Cannot start restore, another operation running"
            time.sleep(1)
        Cluster.log("Restore started")
        timeout=kwargs.get('timeout', 240)
        result = n.run('timeout ' + str(timeout) + ' pbm restore ' + name + ' --wait')
    
        if result.rc == 0:
            Cluster.log(result.stdout)
        else:
            # try to catch possible failures if timeout exceeded
            error=''
            for host in self.mongod_hosts:
                try:
                    container = docker.from_env().containers.get(host)
                    get_logs = container.exec_run(
                        'cat /var/lib/mongo/pbm.restore.log', stderr=False)
                    if get_logs.exit_code == 0:
                        Cluster.log(
                            "!!!!Possible failure on {}, file pbm.restore.log was found:".format(host))
                        logs = get_logs.output.decode('utf-8')
                        Cluster.log(logs)
                        if '"s":"F"' in logs:
                            error = logs
                except docker.errors.APIError:
                    pass
            if error:
                assert False, result.stdout + result.stderr + "\n" + error
            else:
>               assert False, result.stdout + result.stderr
E               AssertionError: Starting restore 2024-04-17T06:25:56.449523336Z to point-in-time 2024-04-17T06:25:26 from '2024-04-17T06:24:36Z'...Started logical restore.
E               Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1713335123.1713335153: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1713335124,"I":2},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"test.system.views","Object":[{"Key":"_id","Value":"test.test2"},{"Key":"viewOn","Value":"system.buckets.test2"},{"Key":"pipeline","Value":[[{"Key":"$_internalUnpackBucket","Value":[{"Key":"timeField","Value":"timestamp"},{"Key":"metaField","Value":"data"},{"Key":"bucketMaxSpanSeconds","Value":3600}]}]]}],"Query":[{"Key":"_id","Value":"test.test2"}],"UI":{"Subtype":4,"Data":"ZoBbprWeR/2FLoWfPqd/hQ=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null} | merr <nil>: applyOps: (NamespaceNotFound) Failed to apply operation due to missing collection (66805ba6-b59e-47fd-852e-859f3ea77f85): { ts: Timestamp(1713335124, 2), t: 1, v: 2, op: "i", ns: "test.system.views", o: { _id: "test.test2", viewOn: "system.buckets.test2", pipeline: [ { $_internalUnpackBucket: { timeField: "timestamp", metaField: "data", bucketMaxSpanSeconds: 3600 } } ] }, o2: { _id: "test.test2" }, ui: UUID("66805ba6-b59e-47fd-852e-859f3ea77f85"), h: 0, wall: new Date(0) }

cluster.py:462: AssertionError
assert pymongo.MongoClient(cluster.connection)["test"]["test1"].count_documents({}) == 20
assert pymongo.MongoClient(cluster.connection)["test"]["test2"].count_documents({}) == 10
Cluster.log("Finished successfully")

0 comments on commit c603245

Please sign in to comment.