Skip to content

Commit

Permalink
Merge branch 'main' into 3562-tablet-client-loadtable-should-show-err…
Browse files Browse the repository at this point in the history
…or-msg
  • Loading branch information
vagetablechicken authored Nov 15, 2023
2 parents cceb677 + 825d155 commit 55796e2
Show file tree
Hide file tree
Showing 170 changed files with 6,898 additions and 3,970 deletions.
2 changes: 2 additions & 0 deletions .github/workflows/cicd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,11 @@ jobs:
uses: actions/upload-artifact@v2
with:
name: linux-ut-result-cpp-${{ github.sha }}
# exclude _deps xml
path: |
build/**/*.xml
reports/*.xml
!build/_deps/*
- name: install
if: ${{ github.event_name == 'push' }}
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/sdk.yml
Original file line number Diff line number Diff line change
Expand Up @@ -352,6 +352,7 @@ jobs:
image: ghcr.io/4paradigm/hybridsql:latest
env:
OPENMLDB_BUILD_TARGET: "openmldb"
OPENMLDB_MODE: standalone
steps:
- uses: actions/checkout@v2

Expand Down
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ endif()
include(FetchContent)
set(FETCHCONTENT_QUIET OFF)
include(farmhash)
include(rapidjson)

# contrib libs
add_subdirectory(contrib EXCLUDE_FROM_ALL)
Expand Down
4 changes: 2 additions & 2 deletions benchmark/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
<dependency>
<groupId>com.4paradigm.openmldb</groupId>
<artifactId>openmldb-jdbc</artifactId>
<version>0.7.0</version>
<version>0.8.3</version>
</dependency>
<dependency>
<groupId>com.4paradigm.openmldb</groupId>
<artifactId>openmldb-native</artifactId>
<version>0.7.0-allinone</version>
<version>0.8.3-allinone</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ public class BenchmarkConfig {
public static long TS_BASE = System.currentTimeMillis();
public static String DEPLOY_NAME;
public static String CSV_PATH;
public static int PUT_BACH_SIZE = 1;

private static SqlExecutor executor = null;
private static SdkOption option = null;
Expand All @@ -58,6 +59,7 @@ public class BenchmarkConfig {
// if(!CSV_PATH.startsWith("/")){
// CSV_PATH=Util.getRootPath()+CSV_PATH;
// }
PUT_BACH_SIZE = Integer.valueOf(prop.getProperty("PUT_BACH_SIZE", "1"));
} catch (Exception e) {
e.printStackTrace();
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
package com._4paradigm.openmldb.benchmark;

import com._4paradigm.openmldb.sdk.SqlExecutor;
import org.openjdk.jmh.annotations.*;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;

import java.sql.Timestamp;
import java.util.Random;
import java.util.concurrent.TimeUnit;

@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@State(Scope.Benchmark)
@Threads(10)
@Fork(value = 1, jvmArgs = {"-Xms8G", "-Xmx8G"})
@Warmup(iterations = 2)
@Measurement(iterations = 5, time = 60)

public class OpenMLDBInsertBenchmark {
private SqlExecutor executor;
private String database = "test_put_db";
private String tableName = "test_put_t1";
private int indexNum;
private String placeholderSQL;
private Random random;
int stringNum = 15;
int doubleNum= 5;
int timestampNum = 5;
int bigintNum = 5;

public OpenMLDBInsertBenchmark() {
executor = BenchmarkConfig.GetSqlExecutor(false);
indexNum = BenchmarkConfig.WINDOW_NUM;
random = new Random();
StringBuilder builder = new StringBuilder();
builder.append("insert into ");
builder.append(tableName);
builder.append(" values (");
for (int i = 0; i < stringNum + doubleNum + timestampNum + bigintNum; i++) {
if (i > 0) {
builder.append(", ");
}
builder.append("?");
}
builder.append(");");
placeholderSQL = builder.toString();
}

@Setup
public void initEnv() {
Util.executeSQL("CREATE DATABASE IF NOT EXISTS " + database + ";", executor);
Util.executeSQL("USE " + database + ";", executor);
String ddl = Util.genDDL(tableName, indexNum);
Util.executeSQL(ddl, executor);
}

@Benchmark
public void executePut() {
java.sql.PreparedStatement pstmt = null;
try {
pstmt = executor.getInsertPreparedStmt(database, placeholderSQL);
for (int num = 0; num < BenchmarkConfig.PUT_BACH_SIZE; num++) {
int idx = 1;
for (int i = 0; i < stringNum; i++) {
if (i < indexNum) {
pstmt.setString(idx, String.valueOf(BenchmarkConfig.PK_BASE + random.nextInt(BenchmarkConfig.PK_NUM)));
} else {
pstmt.setString(idx, "v" + String.valueOf(100000 + random.nextInt(100000)));
}
idx++;
}
for (int i = 0; i < doubleNum; i++) {
pstmt.setDouble(idx, random.nextDouble());
idx++;
}
for (int i = 0; i < timestampNum; i++) {
pstmt.setTimestamp(idx, new Timestamp(System.currentTimeMillis()));
idx++;
}
for (int i = 0; i < bigintNum; i++) {
pstmt.setLong(idx, random.nextLong());
idx++;
}
if (BenchmarkConfig.PUT_BACH_SIZE > 1) {
pstmt.addBatch();
}
}
if (BenchmarkConfig.PUT_BACH_SIZE > 1) {
pstmt.executeBatch();
} else {
pstmt.execute();
}
} catch (Exception e) {
e.printStackTrace();
} finally {
if (pstmt != null) {
try {
pstmt.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
}

@TearDown
public void cleanEnv() {
Util.executeSQL("USE " + database + ";", executor);
Util.executeSQL("DROP TABLE " + tableName + ";", executor);
Util.executeSQL("DROP DATABASE " + database + ";", executor);
}

public static void main(String[] args) {
/* OpenMLDBPutBenchmark benchmark = new OpenMLDBPutBenchmark();
benchmark.initEnv();
benchmark.executePut();
benchmark.cleanEnv();*/

try {
Options opt = new OptionsBuilder()
.include(OpenMLDBInsertBenchmark.class.getSimpleName())
.forks(1)
.build();
new Runner(opt).run();
} catch (Exception e) {
e.printStackTrace();
}
}
}
6 changes: 4 additions & 2 deletions benchmark/src/main/resources/conf.properties
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
ZK_CLUSTER=172.24.4.55:30008
ZK_PATH=/openmldb
ZK_CLUSTER=172.24.4.55:32200
ZK_PATH=/openmldb_test

WINDOW_NUM=2
WINDOW_SIZE=1000
Expand All @@ -12,3 +12,5 @@ PK_BASE=1000000
DATABASE=bank_perf
DEPLOY_NAME=deploy_bank
CSV_PATH=data/bank_flattenRequest.csv

PUT_BACH_SIZE=100
37 changes: 37 additions & 0 deletions cases/plan/create.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1035,3 +1035,40 @@ cases:
+-kind: HIVE
+-path: hdfs://path
+-table_option_list: []
- id: 34
desc: Create 指定压缩
sql: |
create table t1(
column1 int,
column2 timestamp,
index(key=column1, ts=column2)) OPTIONS (compress_type="snappy");
expect:
node_tree_str: |
+-node[CREATE]
+-table: t1
+-IF NOT EXIST: 0
+-column_desc_list[list]:
| +-0:
| | +-node[kColumnDesc]
| | +-column_name: column1
| | +-column_type: int32
| | +-NOT NULL: 0
| +-1:
| | +-node[kColumnDesc]
| | +-column_name: column2
| | +-column_type: timestamp
| | +-NOT NULL: 0
| +-2:
| +-node[kColumnIndex]
| +-keys: [column1]
| +-ts_col: column2
| +-abs_ttl: -2
| +-lat_ttl: -2
| +-ttl_type: <nil>
| +-version_column: <nil>
| +-version_count: 0
+-table_option_list[list]:
+-0:
+-node[kCompressType]
+-compress_type: snappy
71 changes: 67 additions & 4 deletions cases/plan/join_query.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,20 +18,83 @@ cases:
sql: SELECT t1.COL1, t1.COL2, t2.COL1, t2.COL2 FROM t1 full join t2 on t1.col1 = t2.col2;
mode: physical-plan-unsupport
- id: 2
mode: request-unsupport
desc: 简单SELECT LEFT JOIN
mode: runner-unsupport
sql: SELECT t1.COL1, t1.COL2, t2.COL1, t2.COL2 FROM t1 left join t2 on t1.col1 = t2.col2;
expect:
node_tree_str: |
+-node[kQuery]: kQuerySelect
+-distinct_opt: false
+-where_expr: null
+-group_expr_list: null
+-having_expr: null
+-order_expr_list: null
+-limit: null
+-select_list[list]:
| +-0:
| | +-node[kResTarget]
| | +-val:
| | | +-expr[column ref]
| | | +-relation_name: t1
| | | +-column_name: COL1
| | +-name: <nil>
| +-1:
| | +-node[kResTarget]
| | +-val:
| | | +-expr[column ref]
| | | +-relation_name: t1
| | | +-column_name: COL2
| | +-name: <nil>
| +-2:
| | +-node[kResTarget]
| | +-val:
| | | +-expr[column ref]
| | | +-relation_name: t2
| | | +-column_name: COL1
| | +-name: <nil>
| +-3:
| +-node[kResTarget]
| +-val:
| | +-expr[column ref]
| | +-relation_name: t2
| | +-column_name: COL2
| +-name: <nil>
+-tableref_list[list]:
| +-0:
| +-node[kTableRef]: kJoin
| +-join_type: LeftJoin
| +-left:
| | +-node[kTableRef]: kTable
| | +-table: t1
| | +-alias: <nil>
| +-right:
| +-node[kTableRef]: kTable
| +-table: t2
| +-alias: <nil>
| +-order_expressions: null
| +-on:
| +-expr[binary]
| +-=[list]:
| +-0:
| | +-expr[column ref]
| | +-relation_name: t1
| | +-column_name: col1
| +-1:
| +-expr[column ref]
| +-relation_name: t2
| +-column_name: col2
+-window_list: []
- id: 3
desc: 简单SELECT LAST JOIN
sql: SELECT t1.COL1, t1.COL2, t2.COL1, t2.COL2 FROM t1 last join t2 order by t2.col5 on t1.col1 = t2.col2;
- id: 4
desc: 简单SELECT RIGHT JOIN
sql: SELECT t1.COL1, t1.COL2, t2.COL1, t2.COL2 FROM t1 right join t2 on t1.col1 = t2.col2;
mode: runner-unsupport
mode: physical-plan-unsupport
- id: 5
desc: LeftJoin有不等式条件
sql: SELECT t1.col1 as t1_col1, t2.col2 as t2_col2 FROM t1 left join t2 on t1.col1 = t2.col2 and t2.col5 >= t1.col5;
mode: runner-unsupport
mode: request-unsupport
- id: 6
desc: LastJoin有不等式条件
sql: SELECT t1.col1 as t1_col1, t2.col2 as t2_col2 FROM t1 last join t2 order by t2.col5 on t1.col1 = t2.col2 and t2.col5 >= t1.col5;
Expand Down Expand Up @@ -162,4 +225,4 @@ cases:
col1 as id,
sum(col2) OVER w2 as w2_col2_sum FROM t1 WINDOW
w2 AS (PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 1d OPEN PRECEDING AND CURRENT ROW)
) as out1 ON out0.id = out1.id;
) as out1 ON out0.id = out1.id;
21 changes: 21 additions & 0 deletions cases/query/fail_query.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,3 +49,24 @@ cases:
SELECT 100 + 1s;
expect:
success: false
- id: 3
desc: unsupport join
inputs:
- name: t1
columns: ["c1 string","c2 int","c4 timestamp"]
indexs: ["index1:c1:c4"]
rows:
- ["aa",20,1000]
- ["bb",30,1000]
- name: t2
columns: ["c2 int","c4 timestamp"]
indexs: ["index1:c2:c4"]
rows:
- [20,3000]
- [20,2000]
sql: |
select t1.c1 as id, t2.* from t1 right join t2
on t1.c2 = t2.c2
expect:
success: false
msg: unsupport join type RightJoin
Loading

0 comments on commit 55796e2

Please sign in to comment.