Skip to content

Commit

Permalink
HBASE-20999 Move hbase-REST to new hbase-connectors repository
Browse files Browse the repository at this point in the history
  • Loading branch information
ben2077 committed May 27, 2020
1 parent 75b93ec commit d2a47aa
Show file tree
Hide file tree
Showing 7 changed files with 396 additions and 66 deletions.
9 changes: 9 additions & 0 deletions hbase-rest/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,10 @@
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-mapreduce</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-connectors-protocol-shaded</artifactId>
Expand Down Expand Up @@ -265,6 +269,11 @@
<artifactId>kerb-simplekdc</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minikdc</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.kerby</groupId>
<artifactId>kerb-core</artifactId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
@InterfaceAudience.Private
public class StorageClusterStatusResource extends ResourceBase {
private static final Logger LOG =
LoggerFactory.getLogger(StorageClusterStatusResource.class);
LoggerFactory.getLogger(StorageClusterStatusResource.class);

static CacheControl cacheControl;
static {
Expand All @@ -62,15 +62,15 @@ public StorageClusterStatusResource() throws IOException {

@GET
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
MIMETYPE_PROTOBUF_IETF})
MIMETYPE_PROTOBUF_IETF})
public Response get(final @Context UriInfo uriInfo) {
if (LOG.isTraceEnabled()) {
LOG.trace("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
ClusterMetrics status = servlet.getAdmin().getClusterMetrics(
EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS));
EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS));
StorageClusterStatusModel model = new StorageClusterStatusModel();
model.setRegions(status.getRegionCount());
model.setRequests(status.getRequestCount());
Expand All @@ -79,26 +79,25 @@ public Response get(final @Context UriInfo uriInfo) {
ServerName sn = entry.getKey();
ServerMetrics load = entry.getValue();
StorageClusterStatusModel.Node node =
model.addLiveNode(
sn.getHostname() + ":" +
Integer.toString(sn.getPort()),
sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE),
(int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE));
model.addLiveNode(
sn.getHostname() + ":" +
Integer.toString(sn.getPort()),
sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE),
(int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE));
node.setRequests(load.getRequestCount());
for (RegionMetrics region: load.getRegionMetrics().values()) {
node.addRegion(region.getRegionName(), region.getStoreCount(),
region.getStoreFileCount(),
(int) region.getStoreFileSize().get(Size.Unit.MEGABYTE),
(int) region.getMemStoreSize().get(Size.Unit.MEGABYTE),
(long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE),
region.getReadRequestCount(),
region.getCpRequestCount(),
region.getWriteRequestCount(),
(int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE),
(int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE),
(int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE),
region.getCompactingCellCount(),
region.getCompactedCellCount());
region.getStoreFileCount(),
(int) region.getStoreFileSize().get(Size.Unit.MEGABYTE),
(int) region.getMemStoreSize().get(Size.Unit.MEGABYTE),
(long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE),
region.getReadRequestCount(),
region.getWriteRequestCount(),
(int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE),
(int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE),
(int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE),
region.getCompactingCellCount(),
region.getCompactedCellCount());
}
}
for (ServerName name: status.getDeadServerNames()) {
Expand All @@ -111,8 +110,8 @@ public Response get(final @Context UriInfo uriInfo) {
} catch (IOException e) {
servlet.getMetrics().incrementFailedGetRequests(1);
return Response.status(Response.Status.SERVICE_UNAVAILABLE)
.type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
.build();
.type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
.build();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@ public static class Region implements Serializable {
private int memstoreSizeMB;
private long storefileIndexSizeKB;
private long readRequestsCount;
private long cpRequestsCount;
private long writeRequestsCount;
private int rootIndexSizeKB;
private int totalStaticIndexSizeKB;
Expand Down Expand Up @@ -154,7 +153,7 @@ public Region(byte[] name) {
*/
public Region(byte[] name, int stores, int storefiles,
int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB,
long readRequestsCount, long cpRequestsCount, long writeRequestsCount,
long readRequestsCount, long writeRequestsCount,
int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
long totalCompactingKVs, long currentCompactedKVs) {
this.name = name;
Expand All @@ -164,7 +163,6 @@ public Region(byte[] name, int stores, int storefiles,
this.memstoreSizeMB = memstoreSizeMB;
this.storefileIndexSizeKB = storefileIndexSizeKB;
this.readRequestsCount = readRequestsCount;
this.cpRequestsCount = cpRequestsCount;
this.writeRequestsCount = writeRequestsCount;
this.rootIndexSizeKB = rootIndexSizeKB;
this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
Expand Down Expand Up @@ -229,13 +227,6 @@ public long getReadRequestsCount() {
return readRequestsCount;
}

/**
* @return the current total read requests made to region
*/
@XmlAttribute
public long getCpRequestsCount() {
return cpRequestsCount;
}

/**
* @return the current total write requests made to region
Expand Down Expand Up @@ -292,13 +283,6 @@ public void setReadRequestsCount(long readRequestsCount) {
this.readRequestsCount = readRequestsCount;
}

/**
* @param cpRequestsCount The current total read requests made to region
*/
public void setCpRequestsCount(long cpRequestsCount) {
this.cpRequestsCount = cpRequestsCount;
}

/**
* @param rootIndexSizeKB The current total size of root-level indexes
* for the region, in KB
Expand Down Expand Up @@ -402,11 +386,11 @@ public void setStorefileIndexSizeKB(long storefileIndexSizeKB) {
*/
public void addRegion(byte[] name, int stores, int storefiles,
int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB,
long readRequestsCount, long cpRequestsCount, long writeRequestsCount,
long readRequestsCount, long writeRequestsCount,
int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
long totalCompactingKVs, long currentCompactedKVs) {
regions.add(new Region(name, stores, storefiles, storefileSizeMB,
memstoreSizeMB, storefileIndexSizeKB, readRequestsCount, cpRequestsCount,
memstoreSizeMB, storefileIndexSizeKB, readRequestsCount,
writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB,
totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs));
}
Expand Down Expand Up @@ -697,7 +681,6 @@ public String toString() {
sb.append("\n readRequestsCount=");
sb.append(region.readRequestsCount);
sb.append("\n cpRequestsCount=");
sb.append(region.cpRequestsCount);
sb.append("\n writeRequestsCount=");
sb.append(region.writeRequestsCount);
sb.append("\n rootIndexSizeKB=");
Expand Down Expand Up @@ -752,7 +735,6 @@ public byte[] createProtobufOutput() {
regionBuilder.setMemStoreSizeMB(region.memstoreSizeMB);
regionBuilder.setStorefileIndexSizeKB(region.storefileIndexSizeKB);
regionBuilder.setReadRequestsCount(region.readRequestsCount);
regionBuilder.setCpRequestsCount(region.cpRequestsCount);
regionBuilder.setWriteRequestsCount(region.writeRequestsCount);
regionBuilder.setRootIndexSizeKB(region.rootIndexSizeKB);
regionBuilder.setTotalStaticIndexSizeKB(region.totalStaticIndexSizeKB);
Expand Down Expand Up @@ -798,7 +780,6 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOExce
region.getMemStoreSizeMB(),
region.getStorefileIndexSizeKB(),
region.getReadRequestsCount(),
region.getCpRequestsCount(),
region.getWriteRequestsCount(),
region.getRootIndexSizeKB(),
region.getTotalStaticIndexSizeKB(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos;
import org.apache.hadoop.hbase.rest.client.Client;
import org.apache.hadoop.hbase.rest.client.Cluster;
import org.apache.hadoop.hbase.rest.client.Response;
Expand Down Expand Up @@ -174,7 +174,7 @@ public static void tearDownAfterClass() throws Exception {
}

private static void createLabels() throws IOException, InterruptedException {
PrivilegedExceptionAction<VisibilityLabelsResponse> action = () -> {
PrivilegedExceptionAction<VisibilityLabelsProtos.VisibilityLabelsResponse> action = () -> {
String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, PUBLIC, TOPSECRET };
try (Connection conn = ConnectionFactory.createConnection(conf)) {
VisibilityClient.addLabels(conn, labels);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil;
import org.apache.hadoop.hbase.rest.http.KeyStoreTestUtil;
import org.apache.hadoop.hbase.rest.model.CellModel;
import org.apache.hadoop.hbase.rest.model.CellSetModel;
import org.apache.hadoop.hbase.rest.model.RowModel;
Expand Down
Loading

0 comments on commit d2a47aa

Please sign in to comment.