Skip to content

PBM-1223 logical backup when distributed txn was started before backup #79

PBM-1223 logical backup when distributed txn was started before backup

PBM-1223 logical backup when distributed txn was started before backup #79

GitHub Actions / JUnit Test Report failed Dec 15, 2023 in 0s

15 tests run, 13 passed, 0 skipped, 2 failed.

Annotations

Check failure on line 89 in pbm-functional/pytest/test_PBM-1090.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_PBM-1090.test_logical

Failed: Timeout >300.0s
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7fbaf065a590>

    @pytest.mark.testcase(test_case_key="T204", test_step_key=1)
    @pytest.mark.timeout(300,func_only=True)
    def test_logical(start_cluster,cluster):
        cluster.check_pbm_status()
    
        client = pymongo.MongoClient(cluster.connection)
        db = client.test
        collection = db.test
        Cluster.log("Create collection, unique index and insert data")
        collection.insert_one({"a": 1, "b": 1, "c": 1})
        collection.create_index([("a",1),("b",1),("c",1)], name='test_index', unique = True)
        res = pymongo.MongoClient(cluster.connection)["test"]["test"].find({})
        Cluster.log('Collection:')
        for r in res:
            Cluster.log(r)
    
        def upsert_1():
            Cluster.log("Starting background upsert 1")
            while upsert:
                query = {"a": 1}
                update = {"$set": {"a": 1, "b": 1, "c": 1}}
                pymongo.MongoClient(cluster.connection)['test']['test'].delete_one(query)
                try:
                    doc = pymongo.MongoClient(cluster.connection)['test']['test'].find_one_and_update(query,update,upsert=True,return_document=pymongo.collection.ReturnDocument.AFTER)
                    #Cluster.log(doc)
                except pymongo.errors.DuplicateKeyError:
                    pass
            Cluster.log("Stopping background upsert 1")
    
        def upsert_2():
            Cluster.log("Starting background upsert 2")
            while upsert:
                query = {"b": 1}
                update = {"$set": {"a": 2, "b": 1, "c": 1}}
                pymongo.MongoClient(cluster.connection)['test']['test'].delete_one(query)
                try:
                    doc = pymongo.MongoClient(cluster.connection)['test']['test'].find_one_and_update(query,update,upsert=True,return_document=pymongo.collection.ReturnDocument.AFTER)
                    #Cluster.log(doc)
                except pymongo.errors.DuplicateKeyError:
                    pass
            Cluster.log("Stopping background upsert 2")
    
        upsert=True
        t1 = threading.Thread(target=upsert_1)
        t2 = threading.Thread(target=upsert_2)
        t1.start()
        t2.start()
    
>       backup = cluster.make_backup("logical")

test_PBM-1090.py:89: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <cluster.Cluster object at 0x7fbaf065a590>, type = 'logical'

    def make_backup(self, type):
        n = testinfra.get_host("docker://" + self.pbm_cli)
        timeout = time.time() + 120
        while True:
            running = self.get_status()['running']
            Cluster.log("Current operation: " + str(running))
            if not running:
                if type:
                    start = n.check_output(
                        'pbm backup --out=json --type=' + type)
                else:
                    start = n.check_output('pbm backup --out=json')
                name = json.loads(start)['name']
                Cluster.log("Backup started")
                break
            if time.time() > timeout:
                assert False
            time.sleep(1)
        timeout = time.time() + 600
        while True:
            status = self.get_status()
            Cluster.log("Current operation: " + str(status['running']))
            if status['backups']['snapshot']:
                for snapshot in status['backups']['snapshot']:
                    if snapshot['name'] == name:
                        if snapshot['status'] == 'done':
                            Cluster.log("Backup found: " + str(snapshot))
                            return name
                            break
                        elif snapshot['status'] == 'error':
                            self.get_logs()
                            assert False, snapshot['error']
                            break
            if time.time() > timeout:
                assert False, "Backup timeout exceeded"
>           time.sleep(1)
E           Failed: Timeout >300.0s

cluster.py:408: Failed

Check failure on line 79 in pbm-functional/pytest/test_PBM-1223.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_PBM-1223.test_logical

AssertionError: Starting restore 2023-12-15T09:10:47.697704262Z from '2023-12-15T09:10:26Z'...Started logical restore.
Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1702631427.1702631431: apply oplog for chunk: applying a transaction entry: apply txn: {
 "Timestamp": {
  "T": 1702631427,
  "I": 9
 },
 "Term": 1,
 "Hash": null,
 "Version": 2,
 "Operation": "c",
 "Namespace": "admin.$cmd",
 "Object": [
  {
   "Key": "commitTransaction",
   "Value": 1
  },
  {
   "Key": "commitTimestamp",
   "Value": {
    "T": 1702631427,
    "I": 7
   }
  }
 ],
 "Query": null,
 "UI": null,
 "LSID": "SAAAAAVpZAAQAAAABIAdyWrJw0oegMaGMCt0+t4FdWlkACAAAAAAY5mrDaxi8gv8RmdTsQ+1j7fmkr7JUsabhNmXAheU0fgA",
 "TxnNumber": 1,
 "PrevOpTime": "HAAAABF0cwAHAAAAAxh8ZRJ0AAEAAAAAAAAAAA=="
}: unknown transaction id SAAAAAVpZAAQAAAABIAdyWrJw0oegMaGMCt0+t4FdWlkACAAAAAAY5mrDaxi8gv8RmdTsQ+1j7fmkr7JUsabhNmXAheU0fgA-1
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7fbaefef0d90>

    @pytest.mark.testcase(test_case_key="T249", test_step_key=1)
    @pytest.mark.timeout(300,func_only=True)
    def test_logical(start_cluster,cluster):
        cluster.check_pbm_status()
        client = pymongo.MongoClient(cluster.connection)
        db = client.test
        collection = db.test
        with client.start_session() as session:
            with session.start_transaction():
                Cluster.log("Transaction started\n")
                collection.insert_one({"e": 5}, session=session)
                collection.insert_one({"f": 6}, session=session)
                collection.insert_one({"g": 7}, session=session)
                collection.insert_one({"h": 8}, session=session)
                collection.insert_one({"i": 9}, session=session)
                background_backup=concurrent.futures.ThreadPoolExecutor().submit(cluster.make_backup, 'logical')
                time.sleep(1)
                collection.insert_one({"j": 10}, session=session)
                collection.insert_one({"k": 11}, session=session)
                collection.insert_one({"l": 12}, session=session)
                session.commit_transaction()
                Cluster.log("Transaction commited\n")
        backup=background_backup.result()
        assert pymongo.MongoClient(cluster.connection)["test"]["test"].count_documents({}) == 8
>       cluster.make_restore(backup,check_pbm_status=True)

test_PBM-1223.py:79: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <cluster.Cluster object at 0x7fbaefef0d90>, name = '2023-12-15T09:10:26Z'
kwargs = {'check_pbm_status': True}
client = MongoClient(host=['mongos:27017'], document_class=dict, tz_aware=False, connect=True)
result = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7fbaf1243410>, exit_status=1, command=b'time... id SAAAAAVpZAAQAAAABIAdyWrJw0oegMaGMCt0+t4FdWlkACAAAAAAY5mrDaxi8gv8RmdTsQ+1j7fmkr7JUsabhNmXAheU0fgA-1\n', _stderr=b'')
n = <testinfra.host.Host docker://rscfg01>, timeout = 1702631507.5398917

    def make_restore(self, name, **kwargs):
        if self.layout == "sharded":
            client = pymongo.MongoClient(self.connection)
            result = client.admin.command("balancerStop")
            client.close()
            Cluster.log("Stopping balancer: " + str(result))
            self.stop_mongos()
        self.stop_arbiters()
        n = testinfra.get_host("docker://" + self.pbm_cli)
        timeout = time.time() + 60
    
        while True:
            if not self.get_status()['running']:
                break
            if time.time() > timeout:
                assert False, "Cannot start restore, another operation running"
            time.sleep(1)
        Cluster.log("Restore started")
        result = n.run('timeout 240 pbm restore ' + name + ' --wait')
        if result.rc == 124:
            # try to catch possible failures if timeout exceeded
            for host in self.mongod_hosts:
                try:
                    container = docker.from_env().containers.get(host)
                    get_logs = container.exec_run(
                        'cat /var/lib/mongo/pbm.restore.log', stderr=False)
                    if get_logs.exit_code == 0:
                        Cluster.log(
                            "!!!!Possible failure on {}, file pbm.restore.log was found:".format(host))
                        Cluster.log(get_logs.output.decode('utf-8'))
                except docker.errors.APIError:
                    pass
            assert False, "Timeout for restore exceeded"
        elif result.rc == 0:
            Cluster.log(result.stdout)
        else:
>           assert False, result.stdout + result.stderr
E           AssertionError: Starting restore 2023-12-15T09:10:47.697704262Z from '2023-12-15T09:10:26Z'...Started logical restore.
E           Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1702631427.1702631431: apply oplog for chunk: applying a transaction entry: apply txn: {
E            "Timestamp": {
E             "T": 1702631427,
E             "I": 9
E            },
E            "Term": 1,
E            "Hash": null,
E            "Version": 2,
E            "Operation": "c",
E            "Namespace": "admin.$cmd",
E            "Object": [
E             {
E              "Key": "commitTransaction",
E              "Value": 1
E             },
E             {
E              "Key": "commitTimestamp",
E              "Value": {
E               "T": 1702631427,
E               "I": 7
E              }
E             }
E            ],
E            "Query": null,
E            "UI": null,
E            "LSID": "SAAAAAVpZAAQAAAABIAdyWrJw0oegMaGMCt0+t4FdWlkACAAAAAAY5mrDaxi8gv8RmdTsQ+1j7fmkr7JUsabhNmXAheU0fgA",
E            "TxnNumber": 1,
E            "PrevOpTime": "HAAAABF0cwAHAAAAAxh8ZRJ0AAEAAAAAAAAAAA=="
E           }: unknown transaction id SAAAAAVpZAAQAAAABIAdyWrJw0oegMaGMCt0+t4FdWlkACAAAAAAY5mrDaxi8gv8RmdTsQ+1j7fmkr7JUsabhNmXAheU0fgA-1

cluster.py:450: AssertionError