Skip to content

Commit

Permalink
cleanup(storage): simplify test initialization (#14490)
Browse files Browse the repository at this point in the history
Another round of cleanups on test initialization.
  • Loading branch information
coryan authored Jul 18, 2024
1 parent b1b2f3e commit eb2cacc
Show file tree
Hide file tree
Showing 7 changed files with 120 additions and 143 deletions.
56 changes: 27 additions & 29 deletions google/cloud/storage/tests/grpc_bucket_metadata_integration_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,17 +51,16 @@ TEST_F(GrpcBucketMetadataIntegrationTest, BucketMetadataCRUD) {
ASSERT_THAT(project_name, Not(IsEmpty()))
<< "GOOGLE_CLOUD_PROJECT is not set";

auto client = MakeIntegrationTestClient();
ASSERT_STATUS_OK(client);
auto client = MakeIntegrationTestClient(Options{});

auto bucket_name = MakeRandomBucketName();
auto insert = client->CreateBucketForProject(bucket_name, project_name,
BucketMetadata());
auto insert = client.CreateBucketForProject(bucket_name, project_name,
BucketMetadata());
ASSERT_STATUS_OK(insert);
ScheduleForDelete(*insert);
EXPECT_EQ(insert->name(), bucket_name);

auto get = client->GetBucketMetadata(bucket_name);
auto get = client.GetBucketMetadata(bucket_name);
ASSERT_STATUS_OK(get);

// There are too many fields with missing values in the testbench, just test
Expand All @@ -77,21 +76,21 @@ TEST_F(GrpcBucketMetadataIntegrationTest, BucketMetadataCRUD) {

// We need to set the retention policy or the request to lock the retention
// policy (see below) will fail.
auto patch = client->PatchBucket(
auto patch = client.PatchBucket(
bucket_name, BucketMetadataPatchBuilder{}
.SetLabel("l0", "k0")
.SetRetentionPolicy(std::chrono::seconds(30)));
ASSERT_STATUS_OK(patch);
EXPECT_THAT(patch->labels(), ElementsAre(Pair("l0", "k0")));

auto updated = client->UpdateBucket(
auto updated = client.UpdateBucket(
patch->name(), BucketMetadata(*patch).upsert_label("l1", "test-value"));
ASSERT_STATUS_OK(updated);
EXPECT_THAT(updated->labels(),
UnorderedElementsAre(Pair("l0", "k0"), Pair("l1", "test-value")));

auto locked =
client->LockBucketRetentionPolicy(bucket_name, updated->metageneration());
client.LockBucketRetentionPolicy(bucket_name, updated->metageneration());
ASSERT_STATUS_OK(locked);
ASSERT_TRUE(updated->has_retention_policy());
ASSERT_TRUE(locked->has_retention_policy());
Expand All @@ -100,19 +99,19 @@ TEST_F(GrpcBucketMetadataIntegrationTest, BucketMetadataCRUD) {

// Create a second bucket to make the list more interesting.
auto bucket_name_2 = MakeRandomBucketName();
auto insert_2 = client->CreateBucketForProject(bucket_name_2, project_name,
BucketMetadata());
auto insert_2 = client.CreateBucketForProject(bucket_name_2, project_name,
BucketMetadata());
ASSERT_STATUS_OK(insert_2);
ScheduleForDelete(*insert_2);

std::vector<std::string> names;
for (auto const& b : client->ListBucketsForProject(project_name)) {
for (auto const& b : client.ListBucketsForProject(project_name)) {
ASSERT_STATUS_OK(b);
names.push_back(b->name());
}
EXPECT_THAT(names, IsSupersetOf({bucket_name, bucket_name_2}));

auto policy = client->GetNativeBucketIamPolicy(bucket_name);
auto policy = client.GetNativeBucketIamPolicy(bucket_name);
ASSERT_STATUS_OK(policy);

std::vector<std::string> roles;
Expand All @@ -123,18 +122,18 @@ TEST_F(GrpcBucketMetadataIntegrationTest, BucketMetadataCRUD) {
"roles/storage.legacyBucketReader"}));

auto new_policy = *policy;
policy = client->SetNativeBucketIamPolicy(bucket_name, new_policy);
policy = client.SetNativeBucketIamPolicy(bucket_name, new_policy);
ASSERT_STATUS_OK(policy);

auto permissions = client->TestBucketIamPermissions(
auto permissions = client.TestBucketIamPermissions(
bucket_name, {"storage.objects.list", "storage.buckets.update"});
ASSERT_STATUS_OK(permissions);
EXPECT_THAT(*permissions, Contains("storage.buckets.update"));

auto delete_status = client->DeleteBucket(bucket_name);
auto delete_status = client.DeleteBucket(bucket_name);
ASSERT_STATUS_OK(delete_status);

auto post_delete = client->GetBucketMetadata(bucket_name);
auto post_delete = client.GetBucketMetadata(bucket_name);
EXPECT_THAT(post_delete, StatusIs(StatusCode::kNotFound));
}

Expand All @@ -146,30 +145,29 @@ TEST_F(GrpcBucketMetadataIntegrationTest, PatchLabels) {
ASSERT_THAT(project_name, Not(IsEmpty()))
<< "GOOGLE_CLOUD_PROJECT is not set";

auto client = MakeIntegrationTestClient();
ASSERT_STATUS_OK(client);

auto client = MakeIntegrationTestClient(Options{});
auto bucket_name = MakeRandomBucketName();
auto insert = client->CreateBucketForProject(bucket_name, project_name,
BucketMetadata());

auto insert = client.CreateBucketForProject(bucket_name, project_name,
BucketMetadata());
ASSERT_STATUS_OK(insert);
ScheduleForDelete(*insert);
EXPECT_EQ(insert->name(), bucket_name);

auto patch =
client->PatchBucket(bucket_name, BucketMetadataPatchBuilder{}
.SetLabel("test-key0", "v0")
.SetLabel("test-key1", "v1")
.SetLabel("test-key2", "v2"));
client.PatchBucket(bucket_name, BucketMetadataPatchBuilder{}
.SetLabel("test-key0", "v0")
.SetLabel("test-key1", "v1")
.SetLabel("test-key2", "v2"));
ASSERT_STATUS_OK(patch);
EXPECT_THAT(patch->labels(), AllOf(Contains(Pair("test-key0", "v0")),
Contains(Pair("test-key1", "v1")),
Contains(Pair("test-key2", "v2"))));

patch = client->PatchBucket(bucket_name, BucketMetadataPatchBuilder{}
.SetLabel("test-key0", "new-v0")
.ResetLabel("test-key1")
.SetLabel("test-key3", "v3"));
patch = client.PatchBucket(bucket_name, BucketMetadataPatchBuilder{}
.SetLabel("test-key0", "new-v0")
.ResetLabel("test-key1")
.SetLabel("test-key3", "v3"));
ASSERT_STATUS_OK(patch);
EXPECT_THAT(patch->labels(), AllOf(Contains(Pair("test-key0", "new-v0")),
Not(Contains(Pair("test-key1", _))),
Expand Down
12 changes: 6 additions & 6 deletions google/cloud/storage/tests/grpc_hmac_key_integration_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,19 +51,19 @@ TEST_F(GrpcHmacKeyMetadataIntegrationTest, HmacKeyCRUD) {

ScopedEnvironment grpc_config("GOOGLE_CLOUD_CPP_STORAGE_GRPC_CONFIG",
"metadata");
auto client = MakeIntegrationTestClient();
auto client = MakeIntegrationTestClient(Options{});

auto get_ids = [&] {
std::vector<std::string> ids;
auto range = client->ListHmacKeys(ServiceAccountFilter(service_account));
auto range = client.ListHmacKeys(ServiceAccountFilter(service_account));
std::transform(range.begin(), range.end(), std::back_inserter(ids),
[](StatusOr<HmacKeyMetadata> x) { return x.value().id(); });
return ids;
};

auto const initial_ids = get_ids();

auto create = client->CreateHmacKey(service_account);
auto create = client.CreateHmacKey(service_account);
ASSERT_STATUS_OK(create);
auto const key = create->second;
auto const metadata = create->first;
Expand All @@ -73,18 +73,18 @@ TEST_F(GrpcHmacKeyMetadataIntegrationTest, HmacKeyCRUD) {
auto current_ids = get_ids();
EXPECT_THAT(current_ids, Contains(metadata.id()));

auto get = client->GetHmacKey(metadata.access_id());
auto get = client.GetHmacKey(metadata.access_id());
ASSERT_STATUS_OK(get);
EXPECT_EQ(*get, metadata);

// Before we can delete the HmacKey we need to move it to the inactive state.
auto update = metadata;
update.set_state(HmacKeyMetadata::state_inactive());
auto update_response = client->UpdateHmacKey(update.access_id(), update);
auto update_response = client.UpdateHmacKey(update.access_id(), update);
ASSERT_STATUS_OK(update_response);
EXPECT_EQ(update_response->state(), HmacKeyMetadata::state_inactive());

auto delete_response = client->DeleteHmacKey(get->access_id());
auto delete_response = client.DeleteHmacKey(get->access_id());
ASSERT_STATUS_OK(delete_response);

current_ids = get_ids();
Expand Down
60 changes: 24 additions & 36 deletions google/cloud/storage/tests/grpc_integration_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -63,30 +63,28 @@ class GrpcIntegrationTest

TEST_P(GrpcIntegrationTest, ObjectCRUD) {
auto bucket_client = MakeBucketIntegrationTestClient();

auto client = MakeIntegrationTestClient();
ASSERT_STATUS_OK(client);

auto client = MakeIntegrationTestClient(Options{});
auto bucket_name = MakeRandomBucketName();
auto object_name = MakeRandomObjectName();

auto bucket_metadata = bucket_client.CreateBucketForProject(
bucket_name, project_id(), BucketMetadata());
ASSERT_STATUS_OK(bucket_metadata);

EXPECT_EQ(bucket_name, bucket_metadata->name());

auto object_metadata = client->InsertObject(
auto object_metadata = client.InsertObject(
bucket_name, object_name, LoremIpsum(), IfGenerationMatch(0));
ASSERT_STATUS_OK(object_metadata);

auto stream = client->ReadObject(bucket_name, object_name);
auto stream = client.ReadObject(bucket_name, object_name);

std::string actual(std::istreambuf_iterator<char>{stream}, {});
EXPECT_EQ(LoremIpsum(), actual);
EXPECT_STATUS_OK(stream.status());

// This is part of the test, not just a cleanup.
auto delete_object_status = client->DeleteObject(
auto delete_object_status = client.DeleteObject(
bucket_name, object_name, Generation(object_metadata->generation()));
EXPECT_STATUS_OK(delete_object_status);

Expand All @@ -96,12 +94,10 @@ TEST_P(GrpcIntegrationTest, ObjectCRUD) {

TEST_P(GrpcIntegrationTest, WriteResume) {
auto bucket_client = MakeBucketIntegrationTestClient();

auto client = MakeIntegrationTestClient();
ASSERT_STATUS_OK(client);

auto client = MakeIntegrationTestClient(Options{});
auto bucket_name = MakeRandomBucketName();
auto object_name = MakeRandomObjectName();

auto bucket_metadata = bucket_client.CreateBucketForProject(
bucket_name, project_id(), BucketMetadata());
ASSERT_STATUS_OK(bucket_metadata);
Expand All @@ -113,15 +109,15 @@ TEST_P(GrpcIntegrationTest, WriteResume) {
std::string session_id;
{
auto old_os =
client->WriteObject(bucket_name, object_name, IfGenerationMatch(0),
NewResumableUploadSession());
client.WriteObject(bucket_name, object_name, IfGenerationMatch(0),
NewResumableUploadSession());
ASSERT_TRUE(old_os.good()) << "status=" << old_os.metadata().status();
session_id = old_os.resumable_session_id();
std::move(old_os).Suspend();
}

auto os = client->WriteObject(bucket_name, object_name,
RestoreResumableUploadSession(session_id));
auto os = client.WriteObject(bucket_name, object_name,
RestoreResumableUploadSession(session_id));
ASSERT_TRUE(os.good()) << "status=" << os.metadata().status();
EXPECT_EQ(session_id, os.resumable_session_id());
os << LoremIpsum();
Expand All @@ -137,7 +133,7 @@ TEST_P(GrpcIntegrationTest, WriteResume) {
EXPECT_EQ("resumable", meta.metadata("x_emulator_upload"));
}

auto status = client->DeleteObject(bucket_name, object_name);
auto status = client.DeleteObject(bucket_name, object_name);
EXPECT_STATUS_OK(status);

auto delete_bucket_status = bucket_client.DeleteBucket(bucket_name);
Expand All @@ -146,12 +142,10 @@ TEST_P(GrpcIntegrationTest, WriteResume) {

TEST_P(GrpcIntegrationTest, InsertLarge) {
auto bucket_client = MakeBucketIntegrationTestClient();

auto client = MakeIntegrationTestClient();
ASSERT_STATUS_OK(client);

auto client = MakeIntegrationTestClient(Options{});
auto bucket_name = MakeRandomBucketName();
auto object_name = MakeRandomObjectName();

auto bucket_metadata = bucket_client.CreateBucketForProject(
bucket_name, project_id(), BucketMetadata());
ASSERT_STATUS_OK(bucket_metadata);
Expand All @@ -161,8 +155,8 @@ TEST_P(GrpcIntegrationTest, InsertLarge) {
// multiple of 256 KiB.
auto const desired_size = 8 * 1024 * 1024L + 253 * 1024 + 15;
auto data = MakeRandomData(desired_size);
auto metadata = client->InsertObject(bucket_name, object_name, data,
IfGenerationMatch(0));
auto metadata =
client.InsertObject(bucket_name, object_name, data, IfGenerationMatch(0));
ASSERT_STATUS_OK(metadata);
ScheduleForDelete(*metadata);

Expand All @@ -171,12 +165,10 @@ TEST_P(GrpcIntegrationTest, InsertLarge) {

TEST_P(GrpcIntegrationTest, StreamLargeChunks) {
auto bucket_client = MakeBucketIntegrationTestClient();

auto client = MakeIntegrationTestClient();
ASSERT_STATUS_OK(client);

auto client = MakeIntegrationTestClient(Options{});
auto bucket_name = MakeRandomBucketName();
auto object_name = MakeRandomObjectName();

auto bucket_metadata = bucket_client.CreateBucketForProject(
bucket_name, project_id(), BucketMetadata());
ASSERT_STATUS_OK(bucket_metadata);
Expand All @@ -186,7 +178,7 @@ TEST_P(GrpcIntegrationTest, StreamLargeChunks) {
auto const desired_size = 8 * 1024 * 1024L;
auto data = MakeRandomData(desired_size);
auto stream =
client->WriteObject(bucket_name, object_name, IfGenerationMatch(0));
client.WriteObject(bucket_name, object_name, IfGenerationMatch(0));
stream.write(data.data(), data.size());
EXPECT_TRUE(stream.good());
stream.write(data.data(), data.size());
Expand All @@ -200,14 +192,12 @@ TEST_P(GrpcIntegrationTest, StreamLargeChunks) {
}

TEST_P(GrpcIntegrationTest, QuotaUser) {
auto client = MakeIntegrationTestClient();
ASSERT_STATUS_OK(client);

auto client = MakeIntegrationTestClient(Options{});
auto object_name = MakeRandomObjectName();

auto metadata =
client->InsertObject(bucket_name(), object_name, LoremIpsum(),
IfGenerationMatch(0), QuotaUser("test-only"));
client.InsertObject(bucket_name(), object_name, LoremIpsum(),
IfGenerationMatch(0), QuotaUser("test-only"));
ASSERT_STATUS_OK(metadata);
ScheduleForDelete(*metadata);
}
Expand All @@ -217,12 +207,10 @@ TEST_P(GrpcIntegrationTest, FieldFilter) {
auto const* fields = UsingGrpc() ? "resource.bucket,resource.name,resource."
"generation,resource.content_type"
: "bucket,name,generation,contentType";
auto client = MakeIntegrationTestClient();
ASSERT_STATUS_OK(client);

auto client = MakeIntegrationTestClient(Options{});
auto object_name = MakeRandomObjectName();

auto metadata = client->InsertObject(
auto metadata = client.InsertObject(
bucket_name(), object_name, LoremIpsum(), IfGenerationMatch(0),
ContentType("text/plain"), ContentEncoding("utf-8"), Fields(fields));
ASSERT_STATUS_OK(metadata);
Expand Down
Loading

0 comments on commit eb2cacc

Please sign in to comment.