From 9cdafba26eb8cd5a8336de90b85ee1fb6f12cb1f Mon Sep 17 00:00:00 2001 From: knonomura Date: Fri, 9 Feb 2024 19:11:35 +0900 Subject: [PATCH 1/5] Update for V5.5 --- .../common/data/MetaContainerFileIO.java | 59 ++++++---- .../gs/tools/common/data/ToolConstants.java | 10 +- .../tools/common/data/ToolContainerInfo.java | 104 +++++++++++++----- 3 files changed, 123 insertions(+), 50 deletions(-) diff --git a/common/src/com/toshiba/mwcloud/gs/tools/common/data/MetaContainerFileIO.java b/common/src/com/toshiba/mwcloud/gs/tools/common/data/MetaContainerFileIO.java index 8da5d85..ff8cd90 100644 --- a/common/src/com/toshiba/mwcloud/gs/tools/common/data/MetaContainerFileIO.java +++ b/common/src/com/toshiba/mwcloud/gs/tools/common/data/MetaContainerFileIO.java @@ -549,10 +549,15 @@ private ToolContainerInfo readMetaFile(String containerName, String dbName) thro break; case VALUE_NUMBER: if ( isContainer ){ + Integer number = jp.getInt(); if (key.equalsIgnoreCase(ToolConstants.JSON_META_PARTITION_NO)) { ci.setPartitionNo(jp.getInt()); } else if (key.equalsIgnoreCase(ToolConstants.JSON_META_EXPIRATION_TIME)) { expirationTime = jp.getInt(); + } else if (key.equalsIgnoreCase(ToolConstants.JSON_META_INTERVAL_WORKER_GROUP)) { + ci.setIntervalWorkerGroup(number); + } else if (key.equalsIgnoreCase(ToolConstants.JSON_META_INTERVAL_WORKER_GROUP_POSITION)) { + ci.setIntervalWorkerGroupPos(number); } } break; @@ -775,7 +780,7 @@ private void readColumnSet(JsonParser jp, ToolContainerInfo ci) throws GridStore } ColumnInfo columnInfo = new ColumnInfo(columnName, columnType, nullable, null); - if(precision != null && columnType == GSType.TIMESTAMP) { + if (precision != null && columnType == GSType.TIMESTAMP) { ColumnInfo.Builder builder = new ColumnInfo.Builder(columnInfo); builder.setTimePrecision(precision); ColumnInfo swap = builder.toInfo(); @@ -1789,6 +1794,13 @@ private JsonGenerator buildJson(ToolContainerInfo cInfo, JsonGenerator gen) thro gen.writeEnd(); } + if (cInfo.getIntervalWorkerGroup() != null) { + gen.write(ToolConstants.JSON_META_INTERVAL_WORKER_GROUP, cInfo.getIntervalWorkerGroup()); + } + if (cInfo.getIntervalWorkerGroupPos() != null) { + gen.write(ToolConstants.JSON_META_INTERVAL_WORKER_GROUP_POSITION, cInfo.getIntervalWorkerGroupPos()); + } + // V4.3 複合キー対応 rowKeySet追加 gen.writeStartArray(ToolConstants.JSON_META_ROWKEY_SET); List rowKeyColumnList = cInfo.getRowKeyColumnList(); @@ -2032,11 +2044,9 @@ public static GSType convertStringToColumnType(String type) throws GridStoreComm return GSType.TIMESTAMP_ARRAY; } else if (type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_BOOL)) { return GSType.BOOL; - } else if(type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_TIMESTAMP_MILI)) { - return GSType.TIMESTAMP; - } else if(type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_TIMESTAMP_MICRO)) { - return GSType.TIMESTAMP; - } else if(type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_TIMESTAMP_NANO)) { + } else if (type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_TIMESTAMP_MILI) + || type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_TIMESTAMP_MICRO) + || type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_TIMESTAMP_NANO)) { return GSType.TIMESTAMP; } return GSType.valueOf(type.toUpperCase().trim()); @@ -2044,13 +2054,14 @@ public static GSType convertStringToColumnType(String type) throws GridStoreComm } catch (Exception e) { // "カラム種別の解析処理でエラーが発生しました" //throw new GridStoreCommandException(messageResource.getString("MESS_COMM_ERR_METAINFO_13")+ ": type=[" - throw new GridStoreCommandException("Error occurded in convert to type"+ ": type=[" + throw new GridStoreCommandException("Error occurred when converting to type"+ ": type=[" +type+"] msg=[" + e.getMessage()+"]", e); } } /** * Convert string to value of TimeUnit type + * * @param unit the unit of precision * @return TimeUnit value * @throws GridStoreCommandException @@ -2059,7 +2070,7 @@ public static TimeUnit convertStringToTimeUnit(String unit) throws GridStoreComm try { return TimeUnit.valueOf(unit.toUpperCase().trim()); } catch (Exception e) { - throw new GridStoreCommandException("Error occurded in converting to time unit" + throw new GridStoreCommandException("Error occurred when converting to time unit" + ": unit=[" + unit + "] msg=[" + e.getMessage() + "]", e); } } @@ -2069,20 +2080,21 @@ public static TimeUnit convertStringToTimeUnit(String unit) throws GridStoreComm * TIMESTAMP(3) -> MILLISECOND * TIMESTAMP(6) -> MICROSECOND * TIMESTAMP(9) -> NANOSECOND + * * @param preciseTimestampType the precise timestamp type string * @return TimeUnit * @throws GridStoreCommandException */ public static TimeUnit convertTimestampStringToTimeUnit(String preciseTimestampType) throws GridStoreCommandException { String type = preciseTimestampType.trim(); - if(type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_TIMESTAMP_MILI)) { + if (type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_TIMESTAMP_MILI)) { return TimeUnit.MILLISECOND; - } else if(type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_TIMESTAMP_MICRO)) { + } else if (type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_TIMESTAMP_MICRO)) { return TimeUnit.MICROSECOND; - } else if(type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_TIMESTAMP_NANO)) { + } else if (type.equalsIgnoreCase(ToolConstants.COLUMN_TYPE_TIMESTAMP_NANO)) { return TimeUnit.NANOSECOND; } else { - throw new GridStoreCommandException("Error occurded in convert to type" + throw new GridStoreCommandException("Error occurred when converting to type" + ": type=[" + type + "] msg=[Not a precise timestamp type]"); } } @@ -2092,8 +2104,9 @@ public static TimeUnit convertTimestampStringToTimeUnit(String preciseTimestampT * TimeUnit.MILLISECOND -> TIMESTAMP(3) * TimeUnit.MICROSECOND -> TIMESTAMP(6) * TimeUnit.NANOSECOND -> TIMESTAMP(9) - * @param timeUnit - * @return String of TIMESTAMP with number + * + * @param timeUnit The precision time unit of timestamp + * @return String of TIMESTAMP type with numbered suffix * @throws GridStoreCommandException */ public static String convertTimeunitToTimestampType(TimeUnit timeUnit) throws GridStoreCommandException { @@ -2105,7 +2118,7 @@ public static String convertTimeunitToTimestampType(TimeUnit timeUnit) throws Gr case NANOSECOND : return ToolConstants.COLUMN_TYPE_TIMESTAMP_NANO.toUpperCase(); default : - throw new GridStoreCommandException("Error occurded in convert time unit" + throw new GridStoreCommandException("Error occurred when converting time unit" + ": type=[" + timeUnit.name() + "] msg=[Not a time unit]"); } } @@ -2113,6 +2126,7 @@ public static String convertTimeunitToTimestampType(TimeUnit timeUnit) throws Gr /** * Check if a string is TIMESTAMP type has suffix * The valid string is "TIMESTAMP(3)", "TIMESTAMP(6)", and "TIMESTAMP(9)" + * * @param timestampTypeHasSuffix the TIMESTAMP type has suffix * @return true if the given string is timestamp type with number */ @@ -2124,9 +2138,10 @@ public static boolean isTimestampStringInSeconds(String timestampTypeHasSuffix) } /** - * Check if timeunit is MILLISECOND or MICROSECOND or NANOSECOND - * @param timeUnit - * @return true if time unit is MILLISECOND or MICROSECOND or NANOSECOND + * Check if time unit is MILLISECOND, MICROSECOND or NANOSECOND + * + * @param timeUnit the precision time unit of timestamp + * @return true if time unit is MILLISECOND, MICROSECOND or NANOSECOND */ public static boolean isTimestampUnit(TimeUnit timeUnit) { return TimeUnit.MILLISECOND == timeUnit @@ -2136,7 +2151,8 @@ public static boolean isTimestampUnit(TimeUnit timeUnit) { /** * Check if a column is precise timestamp - * @param columnInfo + * + * @param columnInfo GridStore column information * @return true if the given column is precise timestamp */ public static boolean isPreciseColumn(ColumnInfo columnInfo) { @@ -2146,7 +2162,8 @@ public static boolean isPreciseColumn(ColumnInfo columnInfo) { /** * Get the DateTimeFormatter base on time unit - * @param timePrecision + * + * @param timeUnit The precision time unit of timestamp * @return date time format of time unit */ public static DateTimeFormatter getDateTimeFormatter(TimeUnit timeUnit) { @@ -2202,7 +2219,7 @@ private String convertColumnType(GSType type) throws GridStoreCommandException { } catch (Exception e) { // "カラム種別の変換処理でエラーが発生しました" //throw new GridStoreCommandException(messageResource.getString("MESS_COMM_ERR_METAINFO_30")+ ": type=[" - throw new GridStoreCommandException("Error occurded in convert to type"+ ": type=[" + throw new GridStoreCommandException("Error occurred when converting to type"+ ": type=[" +type+"] msg=[" + e.getMessage()+"]", e); } } diff --git a/common/src/com/toshiba/mwcloud/gs/tools/common/data/ToolConstants.java b/common/src/com/toshiba/mwcloud/gs/tools/common/data/ToolConstants.java index b90ee5e..7883bca 100644 --- a/common/src/com/toshiba/mwcloud/gs/tools/common/data/ToolConstants.java +++ b/common/src/com/toshiba/mwcloud/gs/tools/common/data/ToolConstants.java @@ -17,7 +17,7 @@ public class ToolConstants { /** メタ情報ファイルフォーマットのバージョン */ - public static String META_FILE_VERSION = "5.3.0"; + public static String META_FILE_VERSION = "5.5.0"; /** ロウファイルのタイプ(CSV/バイナリ) */ @@ -78,6 +78,8 @@ public static enum RowFileType { CSV, BINARY, AVRO, ARCHIVE_CSV } public static final String JSON_META_EXPIRED_TIME = "expiredTime"; public static final String JSON_META_ERASABLE_TIME = "erasableTime"; public static final String JSON_META_SCHEMA_INFORMATION = "schemaInformation"; + public static final String JSON_META_INTERVAL_WORKER_GROUP = "intervalWorkerGroup"; + public static final String JSON_META_INTERVAL_WORKER_GROUP_POSITION = "intervalWorkerGroupPosition"; public static final String[] JSON_META_GROUP_CONTAINER ={ JSON_META_DBNAME, JSON_META_CONTAINER, JSON_META_CONTAINER_TYPE, JSON_META_CONTAINER_ATTRIBUTE, @@ -86,6 +88,7 @@ public static enum RowFileType { CSV, BINARY, AVRO, ARCHIVE_CSV } JSON_META_EXPIRATION_TYPE, JSON_META_EXPIRATION_TIME, JSON_META_EXPIRATION_TIME_UNIT, JSON_META_ARCHIVE_INFO, JSON_META_NODE_ADDR, JSON_META_NODE_PORT, JSON_META_DATABASE_ID, JSON_META_CONTAINER_ID, JSON_META_DATAPARTITION_ID, + JSON_META_INTERVAL_WORKER_GROUP, JSON_META_INTERVAL_WORKER_GROUP_POSITION, JSON_META_ROW_INDEX_OID, JSON_META_MVCC_INDEX_OID, JSON_META_INIT_SCHEMA_STATUS, JSON_META_SCHEMA_VERSION, JSON_META_START_TIME, JSON_META_END_TIME, JSON_META_EXPIRED_TIME, JSON_META_ERASABLE_TIME, JSON_META_SCHEMA_INFORMATION @@ -293,6 +296,9 @@ public static enum RowFileType { CSV, BINARY, AVRO, ARCHIVE_CSV } /** SQL処理一覧テーブルメタテーブル取得 */ // WHERE START_TIME < now() public static final String STMT_SELECT_SQL_INFO_PAST = " WHERE START_TIME < now()"; + /** select table has interval worker group **/ + public static final String STMT_SELECT_META_TABLES_INTERVAL_WORKER_GROUP = "SELECT TABLE_NAME," + ToolConstants.META_TABLES_INTERVAL_WORKER_GROUP + "," + ToolConstants.META_TABLES_INTERVAL_WORKER_GROUP_POS + " FROM \"" + META_TABLES + "\" WHERE PARTITION_TYPE = 'INTERVAL' AND ((" + ToolConstants.META_TABLES_INTERVAL_WORKER_GROUP + " IS NOT NULL) OR (" + ToolConstants.META_TABLES_INTERVAL_WORKER_GROUP_POS + " IS NOT NULL))"; + /* * プリペアードステートメント */ @@ -394,6 +400,8 @@ public static enum RowFileType { CSV, BINARY, AVRO, ARCHIVE_CSV } public static final String META_TABLES_SUBPARTITION_INTERVAL_VALUE = "SUBPARTITION_INTERVAL_VALUE"; public static final String META_TABLES_SUBPARTITION_INTERVAL_UNIT = "SUBPARTITION_INTERVAL_UNIT"; public static final String META_TABLES_SUBPARTITION_DIVISION_COUNT = "SUBPARTITION_DIVISION_COUNT"; + public static final String META_TABLES_INTERVAL_WORKER_GROUP = "PARTITION_INTERVAL_WORKER_GROUP"; + public static final String META_TABLES_INTERVAL_WORKER_GROUP_POS = "PARTITION_INTERVAL_WORKER_GROUP_POSITION"; // public static final String META_TABLES_CLUSTER_PARTITION_INDEX = "CLUSTER_PARTITION_INDEX"; /* diff --git a/common/src/com/toshiba/mwcloud/gs/tools/common/data/ToolContainerInfo.java b/common/src/com/toshiba/mwcloud/gs/tools/common/data/ToolContainerInfo.java index 5ac0cdc..1147868 100644 --- a/common/src/com/toshiba/mwcloud/gs/tools/common/data/ToolContainerInfo.java +++ b/common/src/com/toshiba/mwcloud/gs/tools/common/data/ToolContainerInfo.java @@ -163,6 +163,12 @@ public class ToolContainerInfo { */ private List JSONrowKeySetStringValueList = null; + /** + * V5.4 [expimp] Handling data deviation + */ + private Integer intervalWorkerGroup = null; + private Integer intervalWorkerGroupPos = null; + /** * コンストラクタ */ @@ -1100,6 +1106,16 @@ public String buildCreateTableStatement() { //WITH (プロパティキー=プロパティ値, ...)"の形式で指定することができます。 // (V4.0) ロウ期限解放 // (V4.1) パーティション期限解放 + // (V5.4) add interval worker group to handling data deviation + ArrayList withList = new ArrayList(); + String iwgPropertiesStr = this.getIntervalWorkerGroupPropertiesString(); + if (iwgPropertiesStr != null && iwgPropertiesStr.length() > 0) { + withList.add(iwgPropertiesStr); + } + if (this.getDataAffinity() != null) { + withList.add("DATA_AFFINITY='" + this.getDataAffinity() + "'"); + } + if (this.hasAdditionalProperty()) { StringBuilder timePropertyStr = new StringBuilder(); boolean additional = false; @@ -1127,13 +1143,8 @@ public String buildCreateTableStatement() { } if ( additional == true ){ // (V4.3.1) CREATE TABLEにてデータアフィニティを設定 - builder.append("WITH("); - if (this.getDataAffinity() != null) { - builder.append("DATA_AFFINITY='" + this.getDataAffinity() + "',"); - } - builder.append("expiration_type='ROW',"); - builder.append(timePropertyStr); - builder.append(")"); + withList.add("expiration_type='ROW'"); + withList.add(timePropertyStr.toString()); } } @@ -1142,31 +1153,15 @@ public String buildCreateTableStatement() { if (additional != true && this.getExpirationInfo() != null) { ExpirationInfo expInfo = this.getExpirationInfo(); // (V4.3.1) CREATE TABLEにてデータアフィニティを設定 - builder.append("WITH("); - if (this.getDataAffinity() != null) { - builder.append("DATA_AFFINITY='" + this.getDataAffinity() + "',"); - } - builder.append(String.format("expiration_type='%s', expiration_time=%d, expiration_time_unit='%s') ", + withList.add(String.format("expiration_type='%s', expiration_time=%d, expiration_time_unit='%s'", expInfo.getType(), expInfo.getTime(), expInfo.getTimeUnit().toString())); - } else if (additional != true && this.getExpirationInfo() == null) { - // 時系列オプションを保持しているが期限解放情報を保持していない場合(CREATE TABLEにてexpiration_typeを指定しない場合) - // (V4.3.1) データアフィニティの情報がある場合、CREATE TABLEにてデータアフィニティを設定 - if (this.getDataAffinity() != null) { - builder.append("WITH("); - builder.append("DATA_AFFINITY='" + this.getDataAffinity()+ "'"); - builder.append(")"); - } - } - } else { - // 時系列オプションを保持していない AND 期限解放情報を保持していない場合(CREATE TABLEにてexpiration_typeを指定しない場合) - // (V4.3.1) データアフィニティの情報がある場合、CREATE TABLEにてデータアフィニティを設定 - if (this.getDataAffinity() != null) { - builder.append("WITH("); - builder.append("DATA_AFFINITY='" + this.getDataAffinity()+ "'"); - builder.append(")"); } } + if (withList.size() > 0) { + builder.append(" WITH (").append(String.join(",", withList)).append(") "); + } + // テーブルパーティショニング if (this.isPartitioned()) { for (int i = 0; i < this.getTablePartitionProperties().size(); i++) { @@ -2480,4 +2475,57 @@ public boolean checkExpImpSetting(){ return true; } + + /** + * Get Interval Worker Group + * + * @return Interval Worker Group value + */ + public Integer getIntervalWorkerGroup(){ + return intervalWorkerGroup; + } + + /** + * Get Interval Worker Group position + * + * @return Interval Worker Group position value + */ + public Integer getIntervalWorkerGroupPos(){ + return this.intervalWorkerGroupPos; + } + + /** + * Set Interval Worker Group + * + * @param value the value to set + */ + public void setIntervalWorkerGroup(Integer value){ + this.intervalWorkerGroup = value; + } + + /** + * Set Interval Worker Group position + * + * @param value the value to set + */ + public void setIntervalWorkerGroupPos(Integer value){ + this.intervalWorkerGroupPos = value; + } + + /** + * Get Interval Worker Group properties list + * + * @return the comma separated properties list in a string + */ + public String getIntervalWorkerGroupPropertiesString() { + ArrayList propertiesList = new ArrayList(); + if (this.intervalWorkerGroup != null) { + propertiesList.add("interval_worker_group=" + this.intervalWorkerGroup + ""); + } + if (this.intervalWorkerGroupPos != null) { + propertiesList.add("interval_worker_group_position=" + this.intervalWorkerGroupPos + ""); + } + return String.join(", ", propertiesList); + } + } From 1d0c5b8fa41915a69a37a28e12fed2ad0727edee Mon Sep 17 00:00:00 2001 From: knonomura Date: Fri, 9 Feb 2024 19:12:29 +0900 Subject: [PATCH 2/5] Update for V5.5 --- .../mwcloud/gs/tools/expimp/cmdAnalyze.java | 30 ++++++- .../gs/tools/expimp/commandLineInfo.java | 20 +++++ .../gs/tools/expimp/exportProcess.java | 72 ++++++++++++----- .../gs/tools/expimp/gridStoreServerIO.java | 26 ++++++ .../gs/tools/expimp/importProcess.java | 80 ++++++++++++------- .../gs/tools/expimp/messageResource.java | 10 +-- .../gs/tools/expimp/messageResource_ja.java | 8 +- 7 files changed, 182 insertions(+), 64 deletions(-) diff --git a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/cmdAnalyze.java b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/cmdAnalyze.java index 07be669..abe4a26 100644 --- a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/cmdAnalyze.java +++ b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/cmdAnalyze.java @@ -468,8 +468,9 @@ private commandLineInfo analyzeParameter_Body(CMD_NAME cmdString, CommandLine co } // BOTH EXPORT IMPORT + // V5.4 support [--all] with [--acl] or [--db] with [--acl] if (commandLine.hasOption("acl")) { - if ( targetType != TARGET_TYPE.ALL ){ + if ( targetType != TARGET_TYPE.ALL && targetType != TARGET_TYPE.DB) { sysoutString(messageResource.getString("MESS_COMM_ERR_CMD_46")); //Use [-acl] at the same time as the [--all] option log.warn(messageResource.getString("MESS_COMM_ERR_CMD_46")); return null; @@ -481,7 +482,7 @@ private commandLineInfo analyzeParameter_Body(CMD_NAME cmdString, CommandLine co // BOTH EXPORT IMPORT if (commandLine.hasOption("silent")) { cli.setSilentFlag(true); - optionMsg +=" --slient"; + optionMsg +=" --silent"; } // BOTH EXPORT IMPORT @@ -692,6 +693,24 @@ private commandLineInfo analyzeParameter_Body(CMD_NAME cmdString, CommandLine co cli.setSchemaCheckSkipFlag(true); optionMsg += " --schemaCheckSkip"; } + + if (commandLine.hasOption("progress")) { + boolean sts = true; + String progress = commandLine.getOptionValue("progress"); + int progress_param = 0; + try { + progress_param = Integer.parseInt(progress); + } catch ( NumberFormatException e ) { + sts = false; + } + if ( !sts || progress_param <= 0) { + sysoutString(messageResource.getString("MESS_COMM_ERR_CMD_59")+ ":[" + progress + "]"); + log.warn(messageResource.getString("MESS_COMM_ERR_CMD_59")+ ":[" + progress + "]"); + return null; + } + cli.setProgress(progress_param); + optionMsg += "--progress"; + } } @@ -1013,6 +1032,13 @@ private Options setImportOptionParameter() { OptionBuilder.withDescription("Intervals"); OptionBuilder.withLongOpt("intervals"); opt.addOption(OptionBuilder.create()); + + OptionBuilder.hasArgs(1); + OptionBuilder.withArgName("progress..."); + OptionBuilder.isRequired(false); + OptionBuilder.withDescription("Progress"); + OptionBuilder.withLongOpt("progress"); + opt.addOption(OptionBuilder.create()); opt.addOption("l", "list", false, "Display Container List in Local File"); opt.addOption("v", "verbose", false, "Verbose Mode"); diff --git a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/commandLineInfo.java b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/commandLineInfo.java index b96bac9..7554917 100644 --- a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/commandLineInfo.java +++ b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/commandLineInfo.java @@ -321,6 +321,8 @@ public class commandLineInfo /*implements commandLineInfo_Reader */{ private int m_maxJobBufferSize = 512; private String m_storeBlockSize = "64KB"; + + private int m_progress = 0; @@ -1278,6 +1280,24 @@ public String getStoreBlockSize() { public void setStoreBlockSize(String storeBlockSize) { m_storeBlockSize = storeBlockSize; } + + /** + * Get the progress row number + * + * @return The progress row number + */ + public int getProgress() { + return m_progress; + } + + /** + * Set the progress row number + * + * @param progress interval row number + */ + public void setProgress(int progress) { + m_progress = progress; + } /** * Get the authentication method diff --git a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/exportProcess.java b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/exportProcess.java index 67a8fe0..ad1c222 100644 --- a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/exportProcess.java +++ b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/exportProcess.java @@ -200,11 +200,11 @@ public boolean start() { List containerInfoList = null; - if ( nThreads > 1 ){ - containerInfoList = new ArrayList(); + if ( nThreads > 1 ){ + containerInfoList = new ArrayList(); - // Parallel processing - // Creating a thread object + // Parallel processing + // Creating a thread object ExportThread[] threadList = new ExportThread[nThreads]; for ( int i = 0; i < nThreads; i++ ){ threadList[i] = new ExportThread(i, comLineInfo, false); @@ -333,7 +333,6 @@ public void exportACL() throws Exception { Map userList = ExperimentalTool.getUsers(store); Map dbList = ExperimentalTool.getDatabases(store); comLineInfo.sysoutString(messageResource.getString("MESS_EXPORT_PROC_EXPORTPROC_21") + userList.size()); - comLineInfo.sysoutString(messageResource.getString("MESS_EXPORT_PROC_EXPORTPROC_22") + dbList.size()); if ( comLineInfo.getTestFlag() ){ return; } @@ -367,37 +366,43 @@ public void exportACL() throws Exception { } gen.writeEnd(); + int exportDatabaseNum = 0; // Database list, ACL list output + // V5.4 support [--all] with [--acl] or [--db] with [--acl] gen.writeStartArray("database"); for(Map.Entry e : dbList.entrySet()) { - gen.writeStartObject(); - gen.write("name", e.getKey()); - - gen.writeStartArray("acl"); - for (Map.Entry aclEntry : e.getValue().getPrivileges().entrySet()){ + if (comLineInfo.getTargetType() == TARGET_TYPE.ALL || (comLineInfo.getTargetType() == TARGET_TYPE.DB && comLineInfo.getDbNamelist().contains(e.getKey()))) { + exportDatabaseNum++; gen.writeStartObject(); - // The contents of V4.5 acl are the same for both users and roles. - gen.write("username", aclEntry.getKey()); - // V4.3 User Privilege Export / write database [] / acl [] / role values - PrivilegeInfo userPrivilegeInfo = aclEntry.getValue(); - if (userPrivilegeInfo != null) { - RoleType userRole = userPrivilegeInfo.getRole(); - if (RoleType.ALL.equals(userRole)) { - gen.write("role", "ALL"); - } else if (RoleType.READ.equals(userRole)) { - gen.write("role", "READ"); + gen.write("name", e.getKey()); + + gen.writeStartArray("acl"); + for (Map.Entry aclEntry : e.getValue().getPrivileges().entrySet()){ + gen.writeStartObject(); + // The contents of V4.5 acl are the same for both users and roles. + gen.write("username", aclEntry.getKey()); + // V4.3 User Privilege Export / write database [] / acl [] / role values + PrivilegeInfo userPrivilegeInfo = aclEntry.getValue(); + if (userPrivilegeInfo != null) { + RoleType userRole = userPrivilegeInfo.getRole(); + if (RoleType.ALL.equals(userRole)) { + gen.write("role", "ALL"); + } else if (RoleType.READ.equals(userRole)) { + gen.write("role", "READ"); + } } + gen.writeEnd(); } gen.writeEnd(); + gen.writeEnd(); } - gen.writeEnd(); - gen.writeEnd(); } gen.writeEnd(); gen.writeEnd(); gen.close(); + comLineInfo.sysoutString(messageResource.getString("MESS_EXPORT_PROC_EXPORTPROC_22") + exportDatabaseNum); // 5. Write file File file = new File(comLineInfo.getDirectoryFullPath(), GSConstants.FILE_GS_EXPORT_ACL_JSON); PrintWriter outACLFile = new PrintWriter(new BufferedWriter( @@ -533,6 +538,7 @@ public List export() { Connection conn = null; List contInfoList = new ArrayList(); Set setPartitionTable = new HashSet(); + HashMap mapTablesHasIntervalWorkerGroup = new HashMap<>(); //@SuppressWarnings("deprecation") //final FetchOption fetchOpt = FetchOption.SIZE; final FetchOption fetchOptPARTIAL = FetchOption.PARTIAL_EXECUTION; @@ -582,6 +588,8 @@ public List export() { // Get a list of partition table names when connecting to JDBC setPartitionTable = gridStoreServerIO.getPartitionTableNames(conn); + // V5.4 Handling data deviation + mapTablesHasIntervalWorkerGroup = gridStoreServerIO.getTablesHasIntervalWorkerGroup(conn); } } @@ -616,6 +624,15 @@ public List export() { toolContInfo.setExpirationInfo(GridDBJdbcUtils.getExpirationInfo(conn, contInfo.getName())); } + // V5.4 Handling data deviation + if (mapTablesHasIntervalWorkerGroup.containsKey(contInfo.getName())) { + Integer[] intervalWorkerGroupInfo = mapTablesHasIntervalWorkerGroup.get(contInfo.getName()); + Integer intervalWorkerGroup = intervalWorkerGroupInfo[0]; + Integer intervalWorkerGroupPos = intervalWorkerGroupInfo[1]; + toolContInfo.setIntervalWorkerGroup(intervalWorkerGroup); + toolContInfo.setIntervalWorkerGroupPos(intervalWorkerGroupPos); + } + // Search container = store.getContainer(contName); String queryString = getQueryStr(toolContInfo); @@ -780,6 +797,7 @@ public List exportTimeInterval() { Set setIntervalPartitionTable = new HashSet(); Set setTimeSeries = new HashSet(); + HashMap mapTablesHasIntervalWorkerGroup = new HashMap<>(); final FetchOption fetchOptPARTIAL = FetchOption.PARTIAL_EXECUTION; Calendar cal = Calendar.getInstance(); @@ -838,6 +856,8 @@ public List exportTimeInterval() { setIntervalPartitionTable = gridStoreServerIO.getIntervalPartitionTableNames(conn); // TimeSeriesコンテナ名の一覧を取得する setTimeSeries = gridStoreServerIO.getTimeSeriesContainerNames(conn); + // V5.4 Handling data deviation + mapTablesHasIntervalWorkerGroup = gridStoreServerIO.getTablesHasIntervalWorkerGroup(conn); } } @@ -982,6 +1002,14 @@ else if ( intervals[0].getTime() > maxBoundaryValue.getTime() && intervals[1].ge log.warn(warnMsg); } } + // V5.4 Handling data deviation + if (mapTablesHasIntervalWorkerGroup.containsKey(contInfo.getName())) { + Integer[] intervalWorkerGroupInfo = mapTablesHasIntervalWorkerGroup.get(contInfo.getName()); + Integer intervalWorkerGroup = intervalWorkerGroupInfo[0]; + Integer intervalWorkerGroupPos = intervalWorkerGroupInfo[1]; + toolContInfo.setIntervalWorkerGroup(intervalWorkerGroup); + toolContInfo.setIntervalWorkerGroupPos(intervalWorkerGroupPos); + } } // 検索 diff --git a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/gridStoreServerIO.java b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/gridStoreServerIO.java index a9d73a2..ec5279b 100644 --- a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/gridStoreServerIO.java +++ b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/gridStoreServerIO.java @@ -29,6 +29,7 @@ import com.toshiba.mwcloud.gs.Row; import com.toshiba.mwcloud.gs.tools.common.data.ToolConstants; import com.toshiba.mwcloud.gs.tools.expimp.util.Utility; +import java.util.HashMap; /** * GGridStore access class @@ -396,6 +397,31 @@ public static Set getTimeSeriesContainerNames(Connection conn) throws Ex return setContainerNames; } + + /** + * Get a list of tables that has interval worker group properties + * @param conn the JBDC connection + * @return list of tables in a map (tableName -> {intervalWorkerGroup, intervalWorkerGroupPos}) + * @throws Exception + */ + public static HashMap getTablesHasIntervalWorkerGroup(Connection conn) throws Exception { + HashMap tablesMap = new HashMap<>(); + try { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery(ToolConstants.STMT_SELECT_META_TABLES_INTERVAL_WORKER_GROUP); + while (rs.next()) { + String tableName = rs.getString(ToolConstants.META_TABLES_TABLE_NAME); + Integer intervalWorkerGroup = (Integer) rs.getObject(ToolConstants.META_TABLES_INTERVAL_WORKER_GROUP); + Integer intervalWorkerGroupPos = (Integer) rs.getObject(ToolConstants.META_TABLES_INTERVAL_WORKER_GROUP_POS); + Integer[] columnsMap = {intervalWorkerGroup, intervalWorkerGroupPos}; + tablesMap.put(tableName, columnsMap); + } + } catch (Exception e ) { + throw e; + } + + return tablesMap; + } /** * Method to create container on GridStore diff --git a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/importProcess.java b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/importProcess.java index b9771e1..d9e9cd4 100644 --- a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/importProcess.java +++ b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/importProcess.java @@ -46,10 +46,8 @@ import com.toshiba.mwcloud.gs.GridStore; import com.toshiba.mwcloud.gs.IndexInfo; import com.toshiba.mwcloud.gs.Row; -import com.toshiba.mwcloud.gs.TriggerInfo; import com.toshiba.mwcloud.gs.experimental.DatabaseInfo; import com.toshiba.mwcloud.gs.experimental.ExperimentalTool; -import com.toshiba.mwcloud.gs.experimental.ExtendedContainerInfo; import com.toshiba.mwcloud.gs.experimental.PrivilegeInfo; import com.toshiba.mwcloud.gs.experimental.PrivilegeInfo.RoleType; import com.toshiba.mwcloud.gs.experimental.UserInfo; @@ -63,7 +61,6 @@ import com.toshiba.mwcloud.gs.tools.common.data.ToolConstants.RowFileType; import com.toshiba.mwcloud.gs.tools.common.data.ToolContainerInfo; import com.toshiba.mwcloud.gs.tools.expimp.GSConstants.TARGET_TYPE; -import com.toshiba.mwcloud.gs.tools.expimp.util.Report; import com.toshiba.mwcloud.gs.tools.expimp.util.Utility; /** @@ -489,40 +486,47 @@ private void importACL() throws Exception{ } comLineInfo.sysoutString(messageResource.getString("MESS_EXPORT_PROC_EXPORTPROC_21")+userInfoList.size()); + int importDatabaseNum = 0; // Database creation / ACL Map gsDbMap = ExperimentalTool.getDatabases(store); for ( Map.Entry> entry : dbInfoMap.entrySet() ){ - DatabaseInfo gsDbInfo = gsDbMap.get(entry.getKey()); - if ( gsDbInfo != null ){ - // Check if it match - for ( Map.Entry entryPri : entry.getValue().entrySet() ){ - if ( gsDbInfo.getPrivileges().get(entryPri.getKey()) == null ){ - throw new GSEIException(messageResource.getString("MESS_IMPORT_ERR_IMPORTPROC_37")+" dbName=["+entry.getKey()+"]"); + if (comLineInfo.getTargetType() == TARGET_TYPE.ALL || + (comLineInfo.getTargetType() == TARGET_TYPE.DB && + comLineInfo.getDbNamelist().contains(entry.getKey()))) { + importDatabaseNum++; + DatabaseInfo gsDbInfo = gsDbMap.get(entry.getKey()); + if ( gsDbInfo != null ){ + // Check if it match + for ( Map.Entry entryPri : entry.getValue().entrySet() ){ + if ( gsDbInfo.getPrivileges().get(entryPri.getKey()) == null ){ + throw new GSEIException(messageResource.getString("MESS_IMPORT_ERR_IMPORTPROC_37")+" dbName=["+entry.getKey()+"]"); + } } - } - } else { - DatabaseInfo dbInfo = new DatabaseInfo(entry.getKey(), entry.getValue()); - try { - ExperimentalTool.putDatabase(store, entry.getKey(), dbInfo, false); - } catch ( GSException e ){ - throw new GSEIException(messageResource.getString("MESS_IMPORT_ERR_IMPORTPROC_3D") - +" dbName=["+entry.getKey()+"] msg=["+e.getMessage()+"]", e); - } - if ( entry.getValue().size() > 0 ){ - for ( Map.Entry privilegeEntry : entry.getValue().entrySet() ){ - try { - ExperimentalTool.putPrivilege(store, entry.getKey(), privilegeEntry.getKey(), privilegeEntry.getValue()); - } catch ( GSException e ){ - throw new GSEIException(messageResource.getString("MESS_IMPORT_ERR_IMPORTPROC_3E") - +" dbName=["+entry.getKey()+"] user=["+entry.getKey()+"] msg=["+e.getMessage()+"]", e); + } else { + DatabaseInfo dbInfo = new DatabaseInfo(entry.getKey(), entry.getValue()); + try { + ExperimentalTool.putDatabase(store, entry.getKey(), dbInfo, false); + } catch ( GSException e ){ + throw new GSEIException(messageResource.getString("MESS_IMPORT_ERR_IMPORTPROC_3D") + +" dbName=["+entry.getKey()+"] msg=["+e.getMessage()+"]", e); + } + if ( entry.getValue().size() > 0 ){ + for ( Map.Entry privilegeEntry : entry.getValue().entrySet() ){ + try { + ExperimentalTool.putPrivilege(store, entry.getKey(), privilegeEntry.getKey(), privilegeEntry.getValue()); + } catch ( GSException e ){ + throw new GSEIException(messageResource.getString("MESS_IMPORT_ERR_IMPORTPROC_3E") + +" dbName=["+entry.getKey()+"] user=["+entry.getKey()+"] msg=["+e.getMessage()+"]", e); + } } } + log.info("create database name=["+entry.getKey()+"]"); } - log.info("create database name=["+entry.getKey()+"]"); } + } - comLineInfo.sysoutString(messageResource.getString("MESS_EXPORT_PROC_EXPORTPROC_22")+dbInfoMap.size()); + comLineInfo.sysoutString(messageResource.getString("MESS_EXPORT_PROC_EXPORTPROC_22") + importDatabaseNum); } catch ( GSEIException e ){ @@ -971,6 +975,11 @@ public int import1(List dataList, Set skipDbList) { // Partition table created via JDBC targetContainer = createPartitionTable(conn, store, cInfo, contInfo); } else { + // メタデータファイルにIntervalWorkerGroup・IntervalWorkerGroupPosが指定されるとエラーになる + if (contInfo.getIntervalWorkerGroup() != null || contInfo.getIntervalWorkerGroupPos() != null) { + throw new GSEIException(messageResource.getString("MESS_COMM_ERR_METAINFO_47") + +": db=["+contInfo.getDbName()+"] containerName=["+contInfo.getName()+"]"); + } // Regular container created with Java API targetContainer = createContainer(store, cInfo, contInfo); } @@ -1121,9 +1130,11 @@ else if (from.getTime() > intervalTo.getTime()) { containerFileList = contInfo.getContainerFileList(); } + String containerName = contInfo.getName(); if (contInfo.getContainerFileType().equals(RowFileType.CSV) || contInfo.getContainerFileType().equals(RowFileType.ARCHIVE_CSV)) { // csv形式は元々1ロウデータファイルのみだったため、複数ロウデータファイルに対応 + int progress = comLineInfo.getProgress(); for (String containerFile:containerFileList) { List containerFiles = new ArrayList(); containerFiles.add(containerFile); @@ -1153,6 +1164,11 @@ else if (from.getTime() > intervalTo.getTime()) { m_timePut += (endMultiPut - startMultiPut); rowList = new ArrayList(commitCount); } + + // Write logs after every progress count + if (progress > 0 && ((addRowCount % progress) == 0)){ + log.info(containerName + ": " + addRowCount + " rows imported."); + } } if ( rowList.size() > 0 ){ startMultiPut = System.currentTimeMillis(); @@ -1166,6 +1182,8 @@ else if (from.getTime() > intervalTo.getTime()) { // ロウファイル読み込み開始 m_fileIO.readContainer(contInfo, containerFileList); + // Get the progress row number + int progress = comLineInfo.getProgress(); // ROWの読み込みと登録 int commitCount = comLineInfo.getCommitCount(); List rowList = new ArrayList(commitCount); @@ -1188,6 +1206,10 @@ else if (from.getTime() > intervalTo.getTime()) { m_timePut += (endMultiPut - startMultiPut); rowList = new ArrayList(commitCount); } + // Write logs after every progress count + if (progress > 0 && ((addRowCount % progress) == 0)){ + log.info(containerName + ": " + addRowCount + " rows imported."); + } } if ( rowList.size() > 0 ){ startMultiPut = System.currentTimeMillis(); @@ -1301,7 +1323,7 @@ private Container createContainer(GridStore store, ContainerInfo cInfo, * @throws GSEIException */ private Container createPartitionTable(Connection conn, GridStore store, - ContainerInfo cInfo, ToolContainerInfo metaInfo) throws GSEIException { + ContainerInfo cInfo, ToolContainerInfo metaInfo) throws GSEIException, GridStoreCommandException { if ( cInfo == null ) { // Create a new table because it does not exist @@ -1368,7 +1390,7 @@ private Container createPartitionTable(Connection conn, GridStore store, * @param metaInfo * @throws GSEIException */ - private void createPartitionTableFromMetaInfo(Connection conn, ToolContainerInfo metaInfo) throws GSEIException { + private void createPartitionTableFromMetaInfo(Connection conn, ToolContainerInfo metaInfo) throws GSEIException, GridStoreCommandException { String sql = metaInfo.buildCreateTableStatement(); Statement stmt = null; try { diff --git a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/messageResource.java b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/messageResource.java index 61277f2..5f3d8dd 100644 --- a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/messageResource.java +++ b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/messageResource.java @@ -20,7 +20,7 @@ */ public class messageResource extends ListResourceBundle { - public static final String VERSION_NUM = "5.3.00"; + public static final String VERSION_NUM = "5.5.00"; public static final String VERSION = "V"+VERSION_NUM; public static final String GS_EXPORT_CE = "gs_export-ce"; public static final String GS_IMPORT_CE = "gs_import-ce"; @@ -93,7 +93,6 @@ public class messageResource extends ListResourceBundle { { "MESS_IMPORT_ERR_IMPORTPROC_31", "D00231: The specified file by -f option is invalid." }, { "MESS_IMPORT_ERR_IMPORTPROC_32", "D00232: There is no target container in the file." }, { "MESS_IMPORT_ERR_IMPORTPROC_33", "D00233: An unexpected error occurred while importing row data." }, - { "MESS_IMPORT_ERR_IMPORTPROC_34", "D00234: An unexpected error occurred while creating trigger." }, { "MESS_IMPORT_ERR_IMPORTPROC_35", "D00235: An ACL file does not exist." }, { "MESS_IMPORT_ERR_IMPORTPROC_36", "D00236: The same user that the user information is different has already existed." }, { "MESS_IMPORT_ERR_IMPORTPROC_37", "D00237: The same database that the database setting is different has already existed." }, @@ -303,9 +302,7 @@ public class messageResource extends ListResourceBundle { { "MESS_COMM_ERR_METAINFO_5", "D00905: An unexpected error occurred while converting container information to json." }, { "MESS_COMM_ERR_METAINFO_6", "D00906: An unexpected error occurred while converting container information to json." }, { "MESS_COMM_ERR_METAINFO_7", "D00907: An unexpected error occurred while checking UTF-8 BOM." },// - { "MESS_COMM_ERR_METAINFO_8", "D00908: An unexpected error occurred while parsing HI properties." },// { "MESS_COMM_ERR_METAINFO_9", "D00909: An unexpected error occurred while parsing timeseries properties." },// - { "MESS_COMM_ERR_METAINFO_10", "D00910: An unexpected error occurred while parsing trigger properties." },// { "MESS_COMM_ERR_METAINFO_11", "D00911: An unexpected error occurred while parsing index properties." },// { "MESS_COMM_ERR_METAINFO_12", "D00912: An unexpected error occurred while parsing column properties." },// { "MESS_COMM_ERR_METAINFO_13", "D00913: An unexpected error occurred while parsing column type." },// @@ -319,7 +316,6 @@ public class messageResource extends ListResourceBundle { { "MESS_COMM_ERR_METAINFO_20", "D00920: The column name of the index information is not set." },// { "MESS_COMM_ERR_METAINFO_21", "D00921: The index type of the index information is not set." },// { "MESS_COMM_ERR_METAINFO_22", "D00922: The column name of the index information does not exist in the column information." },// - { "MESS_COMM_ERR_METAINFO_23", "D00923: The column name of the trigger information does not exist in the column information." },// { "MESS_COMM_ERR_METAINFO_24", "D00924: The column name of HI information does not exist in the column information." },// { "MESS_COMM_ERR_METAINFO_25", "D00925: An unexpected error occurred while checking container information." },// @@ -345,6 +341,7 @@ public class messageResource extends ListResourceBundle { { "MESS_COMM_ERR_METAINFO_44", "D00944: The file specified by \"--filterfile\" is invalid." }, { "MESS_COMM_ERR_METAINFO_45", "D00945: An unexpected error occurred while reading the file specified by \"--filterfile\". " }, { "MESS_COMM_ERR_METAINFO_46", "D00946: The partitioned table exported in format before V4 will be skipped." }, + { "MESS_COMM_ERR_METAINFO_47", "D00947: Interval partition table must be set when (interval_worker_group or interval_worker_group_position) are specified" }, { "MESS_COMM_PROC_CMD_1", "A command duplicate check start." }, { "MESS_COMM_PROC_CMD_2", "A command line parameter persing start." }, @@ -400,7 +397,7 @@ public class messageResource extends ListResourceBundle { { "MESS_COMM_ERR_CMD_42", "D00A42: [--prefixdb] option and [--container] option must be set at same time." },// { "MESS_COMM_ERR_CMD_44", "D00A44: [--list] option is not available at the same time as [--all] option, [--db] option, or [--container] option." },// { "MESS_COMM_ERR_CMD_45", "D00A45: [--parallel] option is invalid. Set a integer value between 2-16." },// - { "MESS_COMM_ERR_CMD_46", "D00A46: [--acl] option and [--all] option must be set at same time." },// + { "MESS_COMM_ERR_CMD_46", "D00A46: [--acl] option and [--all] option or [--db] option must be set at same time." },// { "MESS_COMM_ERR_CMD_47", "D00A47: [--parallel] option is available at same time as [--binary] option and [--out] option." },// { "MESS_COMM_ERR_CMD_48", "D00A48: Property [load.input.threadNum] is invalid. Set a integer value between 1-128." },// { "MESS_COMM_ERR_CMD_49", "D00A49: Property [load.output.threadNum] is invalid. Set a integer value between 1-16." },// @@ -411,6 +408,7 @@ public class messageResource extends ListResourceBundle { { "MESS_COMM_ERR_CMD_56", "D00A56: [--intervals] option is invalid. Specify a value yyyyMMdd(from):yyyyMMdd(to) format and specify so that from < to." },// { "MESS_COMM_ERR_CMD_57", "D00A57: Property [intervalTimeZone] is invalid. Specify TimeZone or GMT+HH:mm format." },// { "MESS_COMM_ERR_CMD_58", "D00A58: [--intervals] option and [--filterfile] option cannot be set at same time." },// + { "MESS_COMM_ERR_CMD_59", "D00A59: [--progress] option is invalid. Please set positive integer number." },// { "MESS_COMM_PROC_PROCINFO_1", "The container name which did not be processed has been detected." },// { "MESS_COMM_PROC_PROCINFO_2", "(%d/%d)Container %s is imported %s.(%s)" },// diff --git a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/messageResource_ja.java b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/messageResource_ja.java index 53a201b..f579ba8 100644 --- a/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/messageResource_ja.java +++ b/expimp-ce/src/com/toshiba/mwcloud/gs/tools/expimp/messageResource_ja.java @@ -92,7 +92,6 @@ public class messageResource_ja extends ListResourceBundle { { "MESS_IMPORT_ERR_IMPORTPROC_31", "D00231: -fオプションで指定されたファイルが不正です" }, { "MESS_IMPORT_ERR_IMPORTPROC_32", "D00232: 対象コンテナがファイルに存在しません" }, { "MESS_IMPORT_ERR_IMPORTPROC_33", "D00233: ロウデータのインポート処理でエラーが発生しました" }, - { "MESS_IMPORT_ERR_IMPORTPROC_34", "D00234: トリガーの作成処理でエラーが発生しました" }, { "MESS_IMPORT_ERR_IMPORTPROC_35", "D00235: ACLファイルが存在していません。" }, { "MESS_IMPORT_ERR_IMPORTPROC_36", "D00236: 同じユーザ名で異なる設定のユーザが存在します" }, { "MESS_IMPORT_ERR_IMPORTPROC_37", "D00237: 同じデータベース名で異なる設定のデータベースが存在します" }, @@ -302,9 +301,7 @@ public class messageResource_ja extends ListResourceBundle { { "MESS_COMM_ERR_METAINFO_5", "D00905: コンテナ情報のJSON化処理で変換エラーが発生しました" }, { "MESS_COMM_ERR_METAINFO_6", "D00906: コンテナ情報のJSON化処理でエラーが発生しました" }, { "MESS_COMM_ERR_METAINFO_7", "D00907: UTF-8のBOM処理でエラーが発生しました" },// - { "MESS_COMM_ERR_METAINFO_8", "D00908: 誤差あり間引き圧縮情報の解析処理でエラーが発生しました" },// { "MESS_COMM_ERR_METAINFO_9", "D00909: 時系列プロパティ情報の解析処理でエラーが発生しました" },// - { "MESS_COMM_ERR_METAINFO_10", "D00910: トリガー情報の解析処理でエラーが発生しました" },// { "MESS_COMM_ERR_METAINFO_11", "D00911: 索引情報の解析処理でエラーが発生しました" },// { "MESS_COMM_ERR_METAINFO_12", "D00912: カラム情報の解析処理でエラーが発生しました" },// { "MESS_COMM_ERR_METAINFO_13", "D00913: カラム種別の解析処理でエラーが発生しました" },// @@ -318,7 +315,6 @@ public class messageResource_ja extends ListResourceBundle { { "MESS_COMM_ERR_METAINFO_20", "D00920: 索引情報のカラム名が設定されていません" },// { "MESS_COMM_ERR_METAINFO_21", "D00921: 索引情報の索引種別が設定されていません" },// { "MESS_COMM_ERR_METAINFO_22", "D00922: 索引情報のカラム名はカラム情報に登録されていません" },// - { "MESS_COMM_ERR_METAINFO_23", "D00923: トリガー情報のカラム名はカラム情報に登録されていません" },// { "MESS_COMM_ERR_METAINFO_24", "D00924: 誤差あり間引き圧縮情報のカラム名はカラム情報に登録されていません" },// { "MESS_COMM_ERR_METAINFO_25", "D00925: コンテナ情報の検査処理でエラーが発生しました" },// @@ -344,6 +340,7 @@ public class messageResource_ja extends ListResourceBundle { { "MESS_COMM_ERR_METAINFO_44", "D00944: --filterfileで指定されたファイルの定義に誤りがあります。" }, { "MESS_COMM_ERR_METAINFO_45", "D00945: --filterfileで指定されたファイルの読み込みでエラーが発生しました。" }, { "MESS_COMM_ERR_METAINFO_46", "D00946: V4より前のエクスポート形式で出力されたパーティショニングコンテナはスキップします。" }, + { "MESS_COMM_ERR_METAINFO_47", "D00947: (interval_worker_groupまたはinterval_worker_group_position)を指定する場合はインターバルパーティショニングテーブルが必須です。" }, { "MESS_COMM_PROC_CMD_1", "コマンド重複チェック処理を開始します" }, { "MESS_COMM_PROC_CMD_2", "コマンドラインパラメタ解析処理を開始します" }, @@ -399,7 +396,7 @@ public class messageResource_ja extends ListResourceBundle { { "MESS_COMM_ERR_CMD_42", "D00A42: [--prefixdb]は、[--container]または[--containerregex]オプションと同時に使用してください" },// { "MESS_COMM_ERR_CMD_44", "D00A44: [--list]と[--all][--db][--container][--containerregex]は同時に設定できません" },// { "MESS_COMM_ERR_CMD_45", "D00A45: [--parallel]の値が不正です (2以上16以下の整数を指定してください)" },// - { "MESS_COMM_ERR_CMD_46", "D00A46: [--acl]は、[--all]オプションと同時に使用してください" },// + { "MESS_COMM_ERR_CMD_46", "D00A46: [--acl]は、[--all]オプション、または[--db]オプションと同時に使用してください" },// { "MESS_COMM_ERR_CMD_47", "D00A47: [--parallel]は、[--binary][--out]と同時に使用してください" },// { "MESS_COMM_ERR_CMD_49", "D00A49: プロパティ[load.output.threadNum]の値が不正です (1以上16以下の整数を指定してください)" },// { "MESS_COMM_ERR_CMD_48", "D00A48: プロパティ[load.input.threadNum]の値が不正です (1以上128以下の整数を指定してください)" },// @@ -410,6 +407,7 @@ public class messageResource_ja extends ListResourceBundle { { "MESS_COMM_ERR_CMD_56", "D00A56: [--intervals]の値が不正です(yyyyMMdd(始点):yyyyMMdd(終点)形式で指定して、始点 < 終点となるように指定してください)" },// { "MESS_COMM_ERR_CMD_57", "D00A57: プロパティ[intervalTimeZone]の値が不正です (タイムゾーン名またはGMT+HH:mm形式で指定してください)" },// { "MESS_COMM_ERR_CMD_58", "D00A58: [--intervals]と[--filterfile]は同時に設定できません" },// + { "MESS_COMM_ERR_CMD_59", "D00A59: [--progress]の値が不正です。正の整数を指定してください" },// { "MESS_COMM_PROC_PROCINFO_1", "処理されないコンテナ名が検出されました" },// { "MESS_COMM_PROC_PROCINFO_2", "(%d/%d)コンテナ %s のインポートに%sしました。(%s)" },// From d062f7b172badd828cfcb0bd79cb66e933aad2a4 Mon Sep 17 00:00:00 2001 From: knonomura Date: Fri, 9 Feb 2024 19:13:11 +0900 Subject: [PATCH 3/5] Change version --- common/build.gradle | 2 +- expimp-ce/build.gradle | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/common/build.gradle b/common/build.gradle index e60a675..3c3cdc8 100644 --- a/common/build.gradle +++ b/common/build.gradle @@ -13,7 +13,7 @@ sourceSets { } } -def gridstoreVersion = '5.3.0' +def gridstoreVersion = '5.5.0' dependencies { implementation 'commons-io:commons-io:2.4' diff --git a/expimp-ce/build.gradle b/expimp-ce/build.gradle index 6315c90..55d8f90 100644 --- a/expimp-ce/build.gradle +++ b/expimp-ce/build.gradle @@ -16,8 +16,8 @@ sourceSets { } } -def gridstoreVersion = '5.3.0' -def gridstoreJdbcVersion = '5.3.0' +def gridstoreVersion = '5.5.0' +def gridstoreJdbcVersion = '5.5.0' dependencies { implementation project (":griddb-tools-common") From b2455d2ca9f6d4d6fe35ee76022faa53abe2b70f Mon Sep 17 00:00:00 2001 From: knonomura Date: Fri, 9 Feb 2024 19:14:01 +0900 Subject: [PATCH 4/5] Update for V5.5 --- README.md | 8 +- Specification_en.md | 184 ++++++++++++++++++++++++++++++++++++++------ Specification_ja.md | 130 +++++++++++++++++++++++++++++-- 3 files changed, 288 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 4083e37..fdb71f7 100644 --- a/README.md +++ b/README.md @@ -9,10 +9,10 @@ The GridDB export/import tools, to recover a database from local damages or the Building and program execution are checked in the environment below. OS: CentOS 7.9 (x64) - Java: Java™ SE Development Kit 8 - GridDB Server: V4.6.1/V5.3.0 CE (Community Edition) - GridDB Java Client: V4.6.1/V5.3.0 CE (Community Edition) - GridDB JDBC: V4.6.0/V5.3.0 CE (Community Edition) + Java: OpenJDK 1.8.0 + GridDB Server: V4.6.1/V5.5.0 CE (Community Edition) + GridDB Java Client: V4.6.1/V5.5.0 CE (Community Edition) + GridDB JDBC: V4.6.0/V5.5.0 CE (Community Edition) ## Quick start - Build and Run diff --git a/Specification_en.md b/Specification_en.md index a6c0cbf..8d8f0c2 100644 --- a/Specification_en.md +++ b/Specification_en.md @@ -246,21 +246,79 @@ $ gs_export --all -u admin/admin -d ./20210131/ --intervals 20210101:20210131 - The --all option specifies all containers, of which only date accumulation containers are used for output. - As multiple row data files are output, it is recommended to specify the destination directory using the -d option together with the --all option. -### How to specify user access rights +#### How to specify user access rights -Information on GridDB cluster users and their access rights can also be exported. Use the following command when migrating all data in the cluster. +Information on GridDB cluster users and their access rights can also be +exported. Use the following command when migrating all data in the +cluster. -- Specify the --all option and --acl option. However, only user information of a general user can be exported. Migrate the data on the administrator user separately (copy the user definition file). +- The [--acl] option must be used with the [--all] option or the [--db] option . However, only user information of a general user can be exported. Migrate the data on the administrator user separately (copy the user definition file). -[Example] +1. Specify the `--all` option and `--acl` option. -``` example -$ gs_export --all -u admin/admin --acl -``` + \[Example\] -[Memo] -- The command needs to be executed by an administrator user. + ```example + $ gs_export --all -u admin/admin --acl + ``` + +2. Specify the `--db` option and `--acl` option. + + \[Example\] + + ``` + $ gs_export --db testdb1 testdb2 -u admin/admin --acl + ``` + +\[Memo\] + +- The command needs to be executed by an administrator user. + +- For v4.5 metadata file `gs_export_acl.json` add and change some out parameters: + | Tag Name | Value | Note | + | ---------------------------- | ---------------------- | ------------------------------------------------------------------- | + | /user[n]/isRole | true or false | whether it is a role.
true: is a role
false: not a role | + | /user[n]/username | user name or role name | Indicates the name of the user or the name of the role. | + | /database[n]/acl[n]/username | user name or role name | Indicates the name of the user or the name of the role. | + + Example of `gs_export_acl.json` v4.5 + + ``` + { + "version":"4.5.0", + "user": + [ + { + "isRole":false, + "username":"user1", + "password":"..." + }, + { + "isRole":true, + "username":"role01", + } + ], + "database": + [ + { + "name":"db1", + "acl": + [ + { + "username":"user1", + "role":"READ" + }, + { + "username":"role01", + "role":"READ" + } + ] + } + ] + } + + ``` #### How to specify a view @@ -505,17 +563,28 @@ There are 3 ways to specify a container, by specifying all the containers in the If data is exported by specifying the --acl option in the export function, data on the user and access rights can also be imported. Use the following command when migrating all data in the cluster. -- Specify the --all option and --acl option. +1. Specify the `--all` option and `--acl` option. -[Example] + \[Example\] -``` example -$ gs_import --all --acl -u admin/admin -``` + ```example + $ gs_import --all --acl -u admin/admin + ``` + +2. Specify the `--db` option and `--acl` option. + + \[Example\] + + ``` + $ gs_import --db testdb1 testdb2 --acl -u admin/admin + ``` + +\[Memo\] -[Memo] - The command needs to be executed by an administrator user. -- Use the following command when migrating all data in the cluster. Execute the command without any databases and general users existing in the migration destination. +- Use the following command when migrating all data in the cluster. + Execute the command without any databases and general users existing + in the migration destination. ##### How to specify a view @@ -661,6 +730,45 @@ $ gs_import --all -u admin/admin -d /data/expdata --force **Detailed settings in the operating display** - Processing details can be displayed by specifying the --verbose option. +**Progress output log when importing container** + +- Executed row numbers can be displayed in the log file by specifying the `--process ` option. +- Interval row number must be positive integer number. + +\[Example\] + +``` +$ gs_import --all -u admin/admin --process 10 +Import Start. +Number of target containers : 100 + +test.dummy : 1 + +Number of target containers:1 ( Success:1 Failure:0 ) +Import Completed. +``` + +Log output in `gs_expimp-YYYYMMDD.log`: + +``` +2023-04-26T17:49:29.413Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importMain [importMain.java::45] Import Start. :Version 5.3.00 +2023-04-26T17:49:29.482Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.propertiesInfo [propertiesInfo.java::361] Property : notificationMember=[10.0.0.124:10001] jdbcNotificationMember=[10.0.0.124:20001] clusterName=[MyCluster] commitCount=[1000] transactionTimeout=[2147483647] failoverTimeout=[10] jdbcLoginTimeout=[10] +2023-04-26T17:49:29.483Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.cmdAnalyze [cmdAnalyze.java::737] Parameter : --all --user=[admin] --directory=[csvtest2] --progress +2023-04-26T17:49:29.565Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importProcess [importProcess.java::208] Get Container Information from metaInfoFile. : containerCount=[1] time=[72] +2023-04-26T17:49:30.209Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importProcess [importProcess.java::1151] dummy: 10 rows imported. +2023-04-26T17:49:30.222Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importProcess [importProcess.java::1151] dummy: 20 rows imported. +2023-04-26T17:49:30.234Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importProcess [importProcess.java::1151] dummy: 30 rows imported. +2023-04-26T17:49:30.246Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importProcess [importProcess.java::1151] dummy: 40 rows imported. +2023-04-26T17:49:30.260Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importProcess [importProcess.java::1151] dummy: 50 rows imported. +2023-04-26T17:49:30.275Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importProcess [importProcess.java::1151] dummy: 60 rows imported. +2023-04-26T17:49:30.290Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importProcess [importProcess.java::1151] dummy: 70 rows imported. +2023-04-26T17:49:30.303Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importProcess [importProcess.java::1151] dummy: 80 rows imported. +2023-04-26T17:49:30.318Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importProcess [importProcess.java::1151] dummy: 90 rows imported. +2023-04-26T17:49:30.327Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importProcess [importProcess.java::1151] dummy: 100 rows imported. +2023-04-26T17:49:30.358Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importProcess [importProcess.java::983] import: db,test,name,dummy,rowCount,100,Time all,508,put,30,readRow,26,readMeta,4,createDrop,30,search,29,other,389 +2023-04-26T17:49:30.363Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.commandProgressStatus [commandProgressStatus.java::112] Number of target containers:1 ( Success:1 Failure:0 ) +2023-04-26T17:49:30.364Z INFO [main] com.toshiba.mwcloud.gs.tools.expimp.importMain [importMain.java::91] Import Completed.: time=[962] +``` ## Command/option specifications @@ -689,7 +797,7 @@ $ gs_import --all -u admin/admin -d /data/expdata --force | --filterfile \ | | Specify the definition file in which the search query used to export rows is described. All rows are exported by default. | | --intervals YYYYMMdd:YYYYMMdd | | If a container to be exported from is a date accumulation container, specify the date range for which rows are retrieved for export in "YYYYMMdd:YYYYMMdd" format, consisting of a start date and an end date separated by a colon. If the date range is not specified, all rows will be exported. The "intervals" option cannot be combined with the "filterfile" option. | | --parallel \ | | Execute in parallel for the specified number. When executed in parallel, the export data will be divided by the same number as the number of parallel executions. This can be specified only for the multi-container format (when the --out option is specified). A range from 2 to 32 can be specified. | - | --acl | | Data on the database, user, access rights will also be exported. This can be specified only if the user is an administrator user and --all option is specified. | + | --acl | | Data on the database, user, access rights will also be exported. This can be specified only if the user is an administrator user and the --all option or the --db option is specified. | | --prefixdb \ | | If a --container option is specified, specify the database name of the container. The containers in the default database will be processed if they are omitted. | | --force | | Processing is forced to continue even if an error occurs. Error descriptions are displayed in a list after processing ends. | | -t\|--test | | Execute the tool in the test mode. | @@ -712,7 +820,7 @@ $ gs_import --all -u admin/admin -d /data/expdata --force | Command | Option/argument | |---------------------|--------------------------------------| - | gs_import | -u|--user \/\
--all | --db \ \[\\] \| ( --container \ \[\\] ... \| --containerregex \ \[\\] ...)
--db \ \[\\]
\[--append|--replace\]
\[-d|--directory \\]
\[-f|--file \ \[\ ...\]\]
\[--count \\]
\[--acl\]
\[--prefixdb \\]
\[--force\]
\[--schemaCheckSkip\]
\[-v|--verbose\]
\[--silent\] | + | gs_import | -u|--user \/\
--all | --db \ \[\\] \| ( --container \ \[\\] ... \| --containerregex \ \[\\] ...)
--db \ \[\\]
\[--append|--replace\]
\[-d|--directory \\]
\[-f|--file \ \[\ ...\]\]
\[--count \\]
\[--acl\]
\[--prefixdb \\]
\[--force\]
\[--schemaCheckSkip\]
\[-v|--verbose\]
\[--silent\]
\[--progress \\] | | gs_import | -l|--list
\[-d|--directory \\]
\[-f|--file \ \[\ ...\]\] | | gs_import | --version | | gs_import | \[-h|--help\] | @@ -732,11 +840,12 @@ $ gs_import --all -u admin/admin -d /data/expdata --force | \-f\|--file \ [\ ...] | | Specify the container data file to be imported. Multiple specifications allowed. All container data files of the current directory or directory specified in d (--directory) will be applicable by default. | | --intervals YYYYMMdd:YYYYMMdd | | If a container to be imported from is a date accumulation container, specify the date range for which rows data files are retrieved for import in "YYYYMMdd:YYYYMMdd" format, consisting of a start date and an end date separated by a colon. If the date range is not specified, all row data files will be imported. | | \--count \ | | Specify the number of input cases until the input data is committed together. | - | \--acl | | Data on the database, user, access rights will also be imported. This can be specified only if the user is an administrator user and the --all option is specified for data exported by specifying the --acl option. | + | \--acl | | Data on the database, user, access rights will also be imported. This can be specified only if the user is an administrator user and the --all option or the --db option is specified for data exported by specifying the --acl option. | | \--prefixdb \ | | If a --container option is specified, specify the database name of the container. The containers in the default database will be processed if they are omitted. | | \--force | | Processing is forced to continue even if an error occurs. Error descriptions are displayed in a list after processing ends. | | \--schemaCheckSkip | | When --append option is specified, a schema check of the existing container will not be executed. | | \-v\|--verbose | | Output the operating display details. | + | \--progress \ | | Write in log file after an interval number of rows were imported | | \--silent | | Operating display is not output. | | \-l\|--list | | Display a list of the specified containers to be imported. | | \--version | | Display the version of the tool. | @@ -781,15 +890,15 @@ The tag and data items of the metadata in the JSON format are shown below. Tags | dataAffinity | Data affinity name | Specify the data affinity name. | Arbitrary | | partitionNo | Partition | Null string indicates no specification. | Arbitrary, output during export. Not used even if it is specified when importing. | | columnSet | Column data set (, schema data) | Column data needs to match when adding data to an existing container | Required | -| columnName | Column name | | Required | -| type | JSON Data type | Specify either of the following values: BOOLEAN/ STRING/ BYTE/ SHORT/ INTEGER/ LONG/ FLOAT/ DOUBLE/ TIMESTAMP/ GEOMETRY/ BLOB/ BOOLEAN\[\]/ STRING\[\]/ BYTE\[\] /SHORT. \[\]/ INTEGER\[\]/ LONG\[\]/ FLOAT\[\]/ DOUBLE\[\]/ TIMESTAMP\[\]. | Required | -| notNull | NOT NULL constraint | true/false | Arbitrary, "false" by default | +|  columnName | Column name | | Required | +|  type | JSON Data type | Specify either of the following values: BOOLEAN/ STRING/ BYTE/ SHORT/ INTEGER/ LONG/ FLOAT/ DOUBLE/ TIMESTAMP/ GEOMETRY/ BLOB/ BOOLEAN\[\]/ STRING\[\]/ BYTE\[\] /SHORT. \[\]/ INTEGER\[\]/ LONG\[\]/ FLOAT\[\]/ DOUBLE\[\]/ TIMESTAMP\[\]. | Required | +|  notNull | NOT NULL constraint | true/false | Arbitrary, "false" by default | | rowKeyAssigned | Row key setting (\*1) | specify either true/false
Specifying also rowKeySet causes an error | Arbitrary, "false" by default | | rowKeySet | Row key column names | Specify row key column names in array format.
The row key needs to match when adding data to an existing container | Arbitrary (\*2) | | indexSet | Index data set | Can be set for each column. Non-existent column name will be ignored or an error will be output. | Arbitrary | -| columnNames | Column names | Specify column names in array format. | Arbitrary (essential when indexSet is specified) | -| type | Index type | Specify one of the following values: TREE (STRING/ BOOLEAN/ BYTE/ SHORT/ INTEGER/ LONG/ FLOAT/ DOUBLE/ TIMESTAMP) or SPATIAL (GEOMETRY). | Arbitrary (essential when indexSet is specified) | -| indexName | Index name | Index name | Arbitrary, not specified either by default or when null is specified. | +|  columnNames | Column names | Specify column names in array format. | Arbitrary (essential when indexSet is specified) | +|  type | Index type | Specify one of the following values: TREE (STRING/ BOOLEAN/ BYTE/ SHORT/ INTEGER/ LONG/ FLOAT/ DOUBLE/ TIMESTAMP) or SPATIAL (GEOMETRY). | Arbitrary (essential when indexSet is specified) | +|  indexName | Index name | Index name | Arbitrary, not specified either by default or when null is specified. | | Table partitioning data | | | | | tablePartitionInfo | Table partitioning data | For Interval-Hash partitioning, specify the following group of items for both Interval and Hash as an array in that order | Arbitrary | | type | Table partitioning type | Specify either HASH or INTERVAL | Essential if tablePartitionInfo is specified | @@ -805,6 +914,9 @@ The tag and data items of the metadata in the JSON format are shown below. Tags | timeIntervalInfo | Time interval information | For a date accumulation container, describe the following information in array format. | Arbitrary | | containerFile | Container data file name | File name | Required if timeIntervalInfo is specified | | boundaryValue | Date range | date to start container data | Required if timeIntervalInfo is specified | +| intervalWorkerGroup | Interval worker group | Specify what worker group is the container on | Essential if containerType is TIME_SERIES or COLLECTION and tablePartitionInfo's type is INTERVAL and the partitioning key is TIMESTAMP | +| intervalWorkerGroupPosition | Position of interval worker group | Specify the position of the worker group | Essential if containerType is TIME_SERIES or COLLECTION and tablePartitionInfo's type is INTERVAL and the partitioning key is TIMESTAMP | + - \* 1: Information output to metadata file before V4.2. Use rowKeySet in V4.3 or later. - \* 2: Required when containerType is TIME_SERIES and rowKeyAssigned is false. @@ -1179,3 +1291,25 @@ For import purposes, any file name can be used for the external object file. Lis 1,10,15,20,40,70,71,72,73,74 ``` + +## Avoid data deviation + +Data deviation can happen when registering a large amount of data into containers. +To avoid that, these parameters are added to metadata file when exporting/importing containers those are interval partition (timeseries or collection) AND the partitioning key is TIMESTAMP: + +| Item | Description | +| --------------------------- | --------------------------------------------------------------------------------------------------- | +| intervalWorkerGroup | Specify what worker group is the container on; corresponding to PARTITION_INTERVAL_WORKER_GROUP | +| intervalWorkerGroupPosition | Specify the position of the worker group; corresponding to PARTITION_INTERVAL_WORKER_GROUP_POSITION | + +- Export: + System will get all the data from #tables table. If the data contains PARTITION_INTERVAL_WORKER_GROUP, PARTITION_INTERVAL_WORKER_GROUP_POSITION it will set their value for the corresponding parameters in the metadata file (.\_properties.json). + +- Import: + System will read from metadatafile (.\_properties.json) + - If intervalWorkerGroup's values exists: Adds `WITH (PARTITION_INTERVAL_WORKER_GROUP = )` to `CREATE TABLE` statement. + - If both intervalWorkerGroup and intervalWorkerGroupPosition's values exists: Adds `WITH (PARTITION_INTERVAL_WORKER_GROUP = AND PARTITION_INTERVAL_WORKER_GROUP_POSITION = )` to `CREATE TABLE` statement + - If intervalWorkerGroupPosition's value is null or intervalWorkerGroup's value is null: + - System will catch the exception returned from server. + - If tablePartitionInfo does not exist but intervalWorkerGroupPosition's value or intervalWorkerGroup's value is not null: + - System will throw exception with error message: Interval partition table must be set when (interval_worker_group or interval_worker_group_position) are specified diff --git a/Specification_ja.md b/Specification_ja.md index 7dbe281..6143867 100644 --- a/Specification_ja.md +++ b/Specification_ja.md @@ -226,6 +226,7 @@ cont_year2014 :select * where timestamp > TIMESTAMP('2014-05-21T08:00:00.000Z' - コンテナが複数の定義に該当する場合、最初に記述された定義が適用されます。 - ファイルはUTF-8で記述してください。 - エクスポートのテスト機能を実行すると、定義ファイルの記述が正しいかを確認することができます。 +- ORDER BYは使用できません。 ◆タイムインターバルによる指定 @@ -251,7 +252,7 @@ $ gs_export --all -u admin/admin -d ./20210131/ --intervals 20210101:20210131 GridDBクラスタのユーザやアクセス権の情報もエクスポートすることができます。 クラスタ上のすべてのデータを移行する場合にご利用ください。 -- --allオプションおよび--aclオプションを指定。ただし、エクスポートできるユーザ情報は、一般ユーザのみです。管理ユーザの情報は別途移行(ユーザ定義ファイルをコピー)してください。 +- --aclオプションと--allオプションまたは--dbオプションを指定。ただし、エクスポートできるユーザ情報は、一般ユーザのみです。管理ユーザの情報は別途移行(ユーザ定義ファイルをコピー)してください。 【例】 @@ -259,6 +260,10 @@ GridDBクラスタのユーザやアクセス権の情報もエクスポート $ gs_export --all -u admin/admin --acl ``` +``` example +$ gs_export --db db001 -u admin/admin --acl +``` + 【メモ】 - 管理ユーザで実行する必要があります。 @@ -399,6 +404,31 @@ $ gs_export --all -u admin/admin --force 【メモ】 - エラーによって処理をスキップしたコンテナについても、不完全ですがコンテナデータファイルに情報を出力します。ただし、エクスポート実行ファイルには記録していないため、インポート処理されることはありません。ロウデータ取得エラー解決後、当該コンテナのエクスポート処理を再実行してください。 +- --forceオプションを指定していない場合、エラーが発生した時点で、エクスポート処理を終了します。 + +【例】 + +``` example +$ gs_export --all -u admin/admin + +エクスポートを開始します +出力ディレクトリ : /var/lib/gridstore/export +対象コンテナ数 : 6 + +Name PartitionId Row +------------------------------------------------------------------ +public.container_2 15 10 +public.container_3 25 20 +[エラーが発生] + +対象コンテナ数 : 6 ( 成功:2 失敗:1 未処理:3 ) + +エクスポートを終了しました +``` + +【注意】 +- --parallelオプションの指定時にエラーが発生しても、エクスポート処理がすぐに終了しない場合があります。 + ### その他の機能 **動作表示の詳細指定** @@ -506,7 +536,7 @@ $ gs_export -c c002 c001 -u admin/admin --silent エクスポート機能で--aclオプションを指定してエクスポートしたデータの場合、ユーザやアクセス権の情報もインポートすることができます。 クラスタ上のすべてのデータを移行する場合にご利用ください。 -- --allオプションおよび --aclオプションを指定。 +- --aclオプションと--allオプションまたは--dbオプションを指定。 【例】 @@ -514,6 +544,10 @@ $ gs_export -c c002 c001 -u admin/admin --silent $ gs_import --all --acl -u admin/admin ``` +``` example +$ gs_import --db db001 --acl -u admin/admin +``` + 【メモ】 - 管理ユーザで実行する必要があります。 - クラスタ上のすべてのデータを移行する場合にご利用ください。移行先にはデータベース・一般ユーザが存在しない状態で実行してください。 @@ -657,11 +691,51 @@ $ gs_import --all -u admin/admin -d /data/expdata --force 【メモ】 - エラーが発生したコレクションは、コンテナデータファイル修正後、コンテナ置き換えオプション(--replace)を指定して再登録してください。 +- --forceオプションを指定していない場合、エラーが発生した時点で、インポート処理を終了します。 + +【例】 + +``` example +$ gs_import --all -u admin/admin + +インポートを開始します +対象コンテナ数 : 6 + +Name PartitionId Row +------------------------------------------------------------------ +public.container_2 15 10 +public.container_3 25 20 +[エラーが発生] + +対象コンテナ数 : 6 ( 成功:2 失敗:1 未処理:3 ) + +インポートを終了しました +``` + +【注意】 +- --parallelオプションの指定時にエラーが発生しても、インポート処理がすぐに終了しない場合があります。 + ### その他の機能 **動作表示の詳細指定** - --verboseオプションを指定することで、処理の詳細を表示することができます。 +**進捗状況の出力指定** +- --progressオプションと値を指定することで、importで登録したロウ数を指定した値の間隔でログ出力することができます。 + +【例】 + +``` example +$ gs_import --all -u admin/admin --progress 10000 + +[gs_importのログ] +container_1: 10000 rows imported. +container_1: 20000 rows imported. +container_1: 30000 rows imported. + : +   : +   : +``` ## コマンド/オプション仕様 @@ -690,7 +764,7 @@ $ gs_import --all -u admin/admin -d /data/expdata --force | --filterfile 定義ファイル名 | | ロウを取り出す検索クエリを記述した定義ファイルを指定します。省略した場合は、すべてのロウがエクスポートされます。 | | --intervals YYYYMMdd:YYYYMMdd | | エクスポート対象のコンテナが日付蓄積型コンテナの場合に、エクスポートで取り出すロウの期間を指定します。指定方法は「YYYYMMdd:YYYYMMdd」フォーマットで左に開始日、右に終了日を指定します。省略した場合は、すべてのロウがエクスポートされます。filterfileオプションと同時に指定することはできません。 | | --parallel 並列実行数 | | 指定された数で並列実行を行います。並列実行を行うと、エクスポートデータは並列実行数と同じ数で分割されます。マルチコンテナ形式の場合(--outオプションを指定した場合)のみ指定できます。指定範囲は、2から32までです。 | - | --acl | | データベース、ユーザ、アクセス権の情報もエクスポートします。管理者ユーザで、かつ --allオプションを指定している場合のみ指定できます。 | + | --acl | | データベース、ユーザ、アクセス権の情報もエクスポートします。管理者ユーザで、かつ --allオプションまたは--dbオプションを指定している場合のみ指定できます。 | | --prefixdb データベース名 | | --containerオプションを指定した場合に、コンテナのデータベース名を指定します。省略した場合は、デフォルトデータベースのコンテナが処理対象になります。 | | --force | | エラーが発生しても処理を継続します。エラー内容は処理終了後に一覧表示されます。 | | -t|--test | | テストモードでツールを実行します。 | @@ -713,7 +787,7 @@ $ gs_import --all -u admin/admin -d /data/expdata --force | コマンド | オプション/引数 | |---------------------|--------------------------------------| - | gs_import | -u|--user ユーザ名/パスワード
--all | --db データベース名 \[データベース名\] | ( --container コンテナ名 \[コンテナ名\] … | --containerregex 正規表現 \[正規表現\] …)
--db データベース名 \[データベース名\]
\[--append|--replace\]
\[-d|--directory インポート対象ディレクトリパス\]
\[-f|--file ファイル名 \[ファイル名…\]\]
\[--intervals YYYYMMdd:YYYYMMdd\]
\[--count コミット数\]
\[--acl\]
\[--prefixdb データベース名\]
\[--force\]
\[--schemaCheckSkip\]
\[-v|--verbose\]
\[--silent\]  | + | gs_import | -u|--user ユーザ名/パスワード
--all | --db データベース名 \[データベース名\] | ( --container コンテナ名 \[コンテナ名\] … | --containerregex 正規表現 \[正規表現\] …)
--db データベース名 \[データベース名\]
\[--append|--replace\]
\[-d|--directory インポート対象ディレクトリパス\]
\[-f|--file ファイル名 \[ファイル名…\]\]
\[--intervals YYYYMMdd:YYYYMMdd\]
\[--count コミット数\]
\[--progress 出力間隔\]
\[--acl\]
\[--prefixdb データベース名\]
\[--force\]
\[--schemaCheckSkip\]
\[-v|--verbose\]
\[--silent\]  | | gs_import | -l|--list
\[-d|--directory ディレクトリパス\]
\[-f|--file ファイル名 \[ファイル名…\]\] | | gs_import | --version | | gs_import | \[-h|--help\] | @@ -733,7 +807,8 @@ $ gs_import --all -u admin/admin -d /data/expdata --force | -f|--file ファイル名 \[ファイル名…\] | | インポート対象となるコンテナデータファイルを指定します。複数指定可能です。省略時は-d(--directory)で指定したディレクトリまたはカレントディレクトリのすべてのコンテナデータファイルを対象とします。 | | --intervals YYYYMMdd:YYYYMMdd | | インポート対象となるコンテナが日付蓄積型コンテナの場合に、インポートで取り出すロウデータファイルの期間を指定します。指定方法は「YYYYMMdd:YYYYMMdd」フォーマットで左に開始日、右に終了日を指定します。省略した場合は、すべてのロウデータファイルがインポートされます。 | | --count コミット数 | | 入力データを一括コミットするまでの入力件数を指定します。 | - | --acl | | データベース、ユーザ、アクセス権の情報もインポートします。--aclオプションを指定してエクスポートしたデータに対して、管理者ユーザで、かつ --allオプションを指定している場合のみ指定できます。 | + | --progress 出力間隔 | | 登録したロウ数をログ出力する間隔を指定します。 | + | --acl | | データベース、ユーザ、アクセス権の情報もインポートします。--aclオプションを指定してエクスポートしたデータに対して、管理者ユーザで、かつ --allオプションまたは--dbオプションを指定している場合のみ指定できます。 | | --prefixdb データベース名 | | --containerオプションを指定した場合に、コンテナのデータベース名を指定します。省略した場合は、デフォルトデータベースのコンテナが処理対象になります。 | | --force | | エラーが発生しても処理を継続します。エラー内容は処理終了後に一覧表示されます。 | | --schemaCheckSkip | | --appendオプションを指定した場合に、既存コンテナとのスキーマチェックを行いません。 | @@ -806,6 +881,9 @@ $ gs_import --all -u admin/admin -d /data/expdata --force | timeIntervalInfo | タイムインターバル情報 | 日付蓄積型コンテナの場合は、以下の情報を配列形式で記述します | 任意 | | containerFile | コンテナデータファイル名 | ファイル名 | timeIntervalInfoを記載した場合は必須 | | boundaryValue | 期間 | コンテナデータの開始の日付 | timeIntervalInfoを記載した場合は必須 | +| データパーティション配置情報 | | | | +| intervalWorkerGroup | 区間グループ番号 | データパーティション配置を決定するグループ番号 | 任意 | +| intervalWorkerGroupPosition | 区間グループノード補正値 | 区間グループで決定したデータパーティションの処理ノードを補正する値 | 任意 | - \*1 : V4.2以前のメタデータファイルに出力される情報です。V4.3以降ではrowKeySetを使用してください。 - \*2 : containerTypeがTIME_SERIESかつrowKeyAssignedがfalseである場合は必須です。 @@ -962,6 +1040,48 @@ $ gs_import --all -u admin/admin -d /data/expdata --force ] ``` +【メモ】 +- 区間グループ番号、区間グループノード補正値をユーザが配置指定した場合、メタ情報がjsonに記述されます。 + +【例1】 データパーティション配置の記述例 + +- 区間グループ番号を1、区間グループノード補正値を1として、ユーザがデータパーティション配置指定した場合 + + ``` example + { + "version":"5.4.0", + "database":"public", + "container":"c001", + "containerType":"COLLECTION", + "containerFileType":"csv", + "partitionNo":1, + "columnSet":[ + { "columnName":"dt", "type":"TIMESTAMP", "notNull":true }, + { "columnName":"val", "type":"LONG", "notNull":true } + ], + "intervalWorkerGroup":1, + "intervalWorkerGroupPosition":1, + "rowKeySet":[ + "dt" + ], + "indexSet":[ + { + "columnNames":[ + "dt" + ], + "type":"TREE", + "indexName":null + } + ], + "tablePartitionInfo":{ + "type":"INTERVAL", + "column":"dt", + "intervalValue":"1", + "intervalUnit":"DAY" + } + } + ``` + ### ロウデータファイル(バイナリデータファイル) ロウデータファイル(バイナリデータファイル)はzip形式であり、gs_exportでのみ作成が可能です。可読性はなく、編集もできません。 From 8442675539e56336cd32e44921fd5a3aa4b38b7c Mon Sep 17 00:00:00 2001 From: knonomura Date: Tue, 13 Feb 2024 10:32:27 +0900 Subject: [PATCH 5/5] Update for V5.5 --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index fdb71f7..24bc00f 100644 --- a/README.md +++ b/README.md @@ -10,9 +10,9 @@ Building and program execution are checked in the environment below. OS: CentOS 7.9 (x64) Java: OpenJDK 1.8.0 - GridDB Server: V4.6.1/V5.5.0 CE (Community Edition) - GridDB Java Client: V4.6.1/V5.5.0 CE (Community Edition) - GridDB JDBC: V4.6.0/V5.5.0 CE (Community Edition) + GridDB Server: V5.5 CE (Community Edition) + GridDB Java Client: 5.5 CE (Community Edition) + GridDB JDBC: V5.5 CE (Community Edition) ## Quick start - Build and Run