PBM-1114 #173
GitHub Actions / JUnit Test Report
failed
Sep 19, 2024 in 0s
32 tests run, 20 passed, 10 skipped, 2 failed.
Annotations
Check failure on line 56 in pbm-functional/pytest/test_PBM-1114.py
github-actions / JUnit Test Report
test_PBM-1114.test_logical_PBM_T266
AssertionError: Starting backup '2024-09-19T06:31:39Z'....
Waiting for '2024-09-19T06:31:39Z' backup................ done
Backup '2024-09-19T06:31:39Z' to remote store 'http://nginx-minio:21114/bcp/pbme2etest'
assert 0 != 0
+ where 0 = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f9f79618a90>, exit_status=0, command=b'pbm ........... done\nBackup '2024-09-19T06:31:39Z' to remote store 'http://nginx-minio:21114/bcp/pbme2etest'\n", _stderr=b'').rc
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f9f78e7bf10>
@pytest.mark.timeout(300, func_only=True)
def test_logical_PBM_T266(start_cluster, cluster):
cluster.check_pbm_status()
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_many(documents)
result = cluster.exec_pbm_cli("backup --wait")
> assert result.rc != 0, result.stdout + result.stderr
E AssertionError: Starting backup '2024-09-19T06:31:39Z'....
E Waiting for '2024-09-19T06:31:39Z' backup................ done
E Backup '2024-09-19T06:31:39Z' to remote store 'http://nginx-minio:21114/bcp/pbme2etest'
E
E assert 0 != 0
E + where 0 = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f9f79618a90>, exit_status=0, command=b'pbm ........... done\nBackup '2024-09-19T06:31:39Z' to remote store 'http://nginx-minio:21114/bcp/pbme2etest'\n", _stderr=b'').rc
test_PBM-1114.py:56: AssertionError
Check failure on line 86 in pbm-functional/pytest/test_PBM-1297.py
github-actions / JUnit Test Report
test_PBM-1297.test_logical_pitr_PBM_T253
AssertionError: Starting restore 2024-09-19T06:51:30.146643805Z to point-in-time 2024-09-19T06:49:59 from '2024-09-19T06:48:36Z'...Started logical restore.
Waiting to finish....Error: operation failed with: reply oplog: replay chunk 1726728519.1726728597: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1726728567,"I":17},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"config.databases","Object":[{"Key":"_id","Value":"test"},{"Key":"primary","Value":"rs2"},{"Key":"partitioned","Value":true},{"Key":"version","Value":[{"Key":"uuid","Value":{"Subtype":4,"Data":"qbIUu0/jQ3CRBdO0q5WLvw=="}},{"Key":"timestamp","Value":{"T":1726728567,"I":15}},{"Key":"lastMod","Value":1}]}],"Query":null,"UI":{"Subtype":4,"Data":"qZgbxuM1R+yoMiO94gzEGg=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null,"MultiOpType":null} | merr <nil>: applyOps: (NamespaceNotFound) cannot apply insert or update operation on a non-existent namespace config.databases: { ts: Timestamp(1726728567, 17), t: 1, v: 2, op: "i", ns: "config.databases", o: { _id: "test", primary: "rs2", partitioned: true, version: { uuid: UUID("a9b214bb-4fe3-4370-9105-d3b4ab958bbf"), timestamp: Timestamp(1726728567, 15), lastMod: 1 } }, ui: UUID("a9981bc6-e335-47ec-a832-23bde20cc41a") }
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f9f7923d710>
newcluster = <cluster.Cluster object at 0x7f9f7961b350>
@pytest.mark.timeout(600, func_only=True)
def test_logical_pitr_PBM_T253(start_cluster,cluster,newcluster):
cluster.check_pbm_status()
cluster.make_backup("logical")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
#Create the first database during oplog slicing
client=pymongo.MongoClient(cluster.connection)
client.admin.command("enableSharding", "test")
client.admin.command("shardCollection", "test.test", key={"_id": "hashed"})
for i in range(100):
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_one({"doc":i})
time.sleep(30)
pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
backup="--time=" + pitr
Cluster.log("Time for PITR is: " + pitr)
time.sleep(30)
cluster.disable_pitr()
time.sleep(10)
cluster.destroy()
newcluster.create()
newcluster.setup_pbm()
time.sleep(10)
newcluster.check_pbm_status()
> newcluster.make_restore(backup,check_pbm_status=True)
test_PBM-1297.py:86:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <cluster.Cluster object at 0x7f9f7961b350>
name = '--time=2024-09-19T06:49:59', kwargs = {'check_pbm_status': True}
client = MongoClient(host=['newmongos:27017'], document_class=dict, tz_aware=False, connect=True)
result = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f9f78facf90>, exit_status=1, command=b'time...mestamp: Timestamp(1726728567, 15), lastMod: 1 } }, ui: UUID("a9981bc6-e335-47ec-a832-23bde20cc41a") }\n', _stderr=b'')
n = <testinfra.host.Host docker://newrscfg01>, timeout = 240, error = ''
host = 'newrscfg01', container = <Container: 3415c458497e>
def make_restore(self, name, **kwargs):
if self.layout == "sharded":
client = pymongo.MongoClient(self.connection)
result = client.admin.command("balancerStop")
client.close()
Cluster.log("Stopping balancer: " + str(result))
self.stop_mongos()
self.stop_arbiters()
n = testinfra.get_host("docker://" + self.pbm_cli)
timeout = time.time() + 60
while True:
if not self.get_status()['running']:
break
if time.time() > timeout:
assert False, "Cannot start restore, another operation running"
time.sleep(1)
Cluster.log("Restore started")
timeout=kwargs.get('timeout', 240)
result = n.run('timeout ' + str(timeout) + ' pbm restore ' + name + ' --wait')
if result.rc == 0:
Cluster.log(result.stdout)
else:
# try to catch possible failures if timeout exceeded
error=''
for host in self.mongod_hosts:
try:
container = docker.from_env().containers.get(host)
get_logs = container.exec_run(
'cat /var/lib/mongo/pbm.restore.log', stderr=False)
if get_logs.exit_code == 0:
Cluster.log(
"!!!!Possible failure on {}, file pbm.restore.log was found:".format(host))
logs = get_logs.output.decode('utf-8')
Cluster.log(logs)
if '"s":"F"' in logs:
error = logs
except docker.errors.APIError:
pass
if error:
assert False, result.stdout + result.stderr + "\n" + error
else:
> assert False, result.stdout + result.stderr
E AssertionError: Starting restore 2024-09-19T06:51:30.146643805Z to point-in-time 2024-09-19T06:49:59 from '2024-09-19T06:48:36Z'...Started logical restore.
E Waiting to finish....Error: operation failed with: reply oplog: replay chunk 1726728519.1726728597: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1726728567,"I":17},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"config.databases","Object":[{"Key":"_id","Value":"test"},{"Key":"primary","Value":"rs2"},{"Key":"partitioned","Value":true},{"Key":"version","Value":[{"Key":"uuid","Value":{"Subtype":4,"Data":"qbIUu0/jQ3CRBdO0q5WLvw=="}},{"Key":"timestamp","Value":{"T":1726728567,"I":15}},{"Key":"lastMod","Value":1}]}],"Query":null,"UI":{"Subtype":4,"Data":"qZgbxuM1R+yoMiO94gzEGg=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null,"MultiOpType":null} | merr <nil>: applyOps: (NamespaceNotFound) cannot apply insert or update operation on a non-existent namespace config.databases: { ts: Timestamp(1726728567, 17), t: 1, v: 2, op: "i", ns: "config.databases", o: { _id: "test", primary: "rs2", partitioned: true, version: { uuid: UUID("a9b214bb-4fe3-4370-9105-d3b4ab958bbf"), timestamp: Timestamp(1726728567, 15), lastMod: 1 } }, ui: UUID("a9981bc6-e335-47ec-a832-23bde20cc41a") }
cluster.py:464: AssertionError
Loading