From 6d32636759f2039c428e02f0ad8975fe4817de56 Mon Sep 17 00:00:00 2001 From: Bogdan Popescu <68062990+bopopescu@users.noreply.github.com> Date: Sun, 26 Jul 2020 12:21:45 +0300 Subject: [PATCH] Discarding the master-slave language --- .../bibencode/lib/bibencode_batch_engine.py | 88 ++++++------- modules/bibencode/lib/bibencode_config.py | 2 +- modules/bibfield/lib/bibfield.py | 16 +-- .../bibfield/lib/bibfield_config_engine.py | 50 ++++---- modules/bibfield/lib/bibfield_jsonreader.py | 4 +- modules/bibfield/lib/bibfield_marcreader.py | 4 +- .../lib/bibfield_marcreader_unit_tests.py | 2 +- modules/bibfield/lib/bibfield_reader.py | 12 +- .../bibfield/lib/bibfield_regression_tests.py | 10 +- modules/bibfield/lib/bibfield_utils.py | 4 +- modules/bibformat/lib/bibformat_bfx_engine.py | 2 +- modules/bibformat/lib/bibformat_dblayer.py | 16 +-- modules/bibindex/lib/bibindex_engine.py | 2 +- .../lib/bibindex_engine_stemmer_greek.py | 2 +- modules/bibmerge/lib/bibmerge_engine.py | 22 ++-- modules/bibmerge/lib/bibmerge_templates.py | 6 +- .../bibsched/lib/tasklets/bst_create_icons.py | 10 +- .../lib/docextract_record_regression_tests.py | 2 +- modules/miscutil/lib/dbdump.py | 120 +++++++++--------- modules/miscutil/lib/dbquery.py | 30 ++--- modules/miscutil/lib/hepdatadisplayutils.py | 20 +-- modules/miscutil/lib/hepdatautils.py | 4 +- modules/miscutil/lib/inveniocfg_upgrader.py | 2 +- ..._11_04_circulation_and_linkback_updates.py | 4 +- ...2012_11_21_aiduserinputlog_userid_check.py | 2 +- ...invenio_2013_01_12_bibrec_master_format.py | 6 +- .../lib/upgrades/invenio_release_1_1_0.py | 12 +- modules/webaccess/lib/access_control_admin.py | 24 ++-- .../webaccess/lib/access_control_firerole.py | 4 +- .../lib/access_control_mailcookie.py | 4 +- .../lib/external_authentication_cern.py | 4 +- .../lib/external_authentication_sso.py | 4 +- modules/websearch/lib/search_engine.py | 2 +- modules/websearch/lib/websearch_webcoll.py | 2 +- modules/websession/lib/webuser.py | 10 +- modules/webstyle/lib/webinterface_handler.py | 2 +- .../lib/functions/Video_Processing.py | 4 +- .../websubmit/lib/websubmitadmin_engine.py | 4 +- modules/websubmit/lib/websubmitadmincli.py | 4 +- 39 files changed, 261 insertions(+), 261 deletions(-) diff --git a/modules/bibencode/lib/bibencode_batch_engine.py b/modules/bibencode/lib/bibencode_batch_engine.py index 994df9d4b..fb8fa354a 100644 --- a/modules/bibencode/lib/bibencode_batch_engine.py +++ b/modules/bibencode/lib/bibencode_batch_engine.py @@ -204,7 +204,7 @@ def _task_write_message(message): def clean_job_for_quality(batch_job_dict, fallback=True): """ - Removes jobs from the batch description that are not suitable for the master + Removes jobs from the batch description that are not suitable for the main video's quality. It applies only for encoding jobs! @param batch_job_dict: the dict containing the batch description @type batch_job_dict: dict @@ -344,7 +344,7 @@ def san_bitrate(bitrate): else: raise Exception("Could not parse bitrate") - if not getval(batch_job, 'update_from_master'): + if not getval(batch_job, 'update_from_main'): if not getval(batch_job, 'input'): raise Exception("No input file in batch description") @@ -354,11 +354,11 @@ def san_bitrate(bitrate): if not getval(batch_job, 'jobs'): raise Exception("No job list in batch description") - if getval(batch_job, 'update_from_master'): - if (not getval(batch_job, 'bibdoc_master_comment') and - not getval(batch_job, 'bibdoc_master_description') and - not getval(batch_job, 'bibdoc_master_subformat')): - raise Exception("If update_from_master ist set, a comment or" + if getval(batch_job, 'update_from_main'): + if (not getval(batch_job, 'bibdoc_main_comment') and + not getval(batch_job, 'bibdoc_main_description') and + not getval(batch_job, 'bibdoc_main_subformat')): + raise Exception("If update_from_main ist set, a comment or" " description or subformat for matching must be given") if getval(batch_job, 'marc_snippet'): @@ -399,7 +399,7 @@ def upload_marcxml_file(marcxml): # GENERAL # #---------# - _task_write_message("----------- Handling Master -----------") + _task_write_message("----------- Handling Main -----------") ## Check the validity of the batch file here batch_job = json_decode_file(batch_job_file) @@ -417,9 +417,9 @@ def upload_marcxml_file(marcxml): # UPDATE FROM MASTER # #--------------------# - ## We want to add new stuff to the video's record, using the master as input - if getval(batch_job, 'update_from_master'): - found_master = False + ## We want to add new stuff to the video's record, using the main as input + if getval(batch_job, 'update_from_main'): + found_main = False bibdocs = recdoc.list_bibdocs() for bibdoc in bibdocs: bibdocfiles = bibdoc.list_all_files() @@ -427,13 +427,13 @@ def upload_marcxml_file(marcxml): comment = bibdocfile.get_comment() description = bibdocfile.get_description() subformat = bibdocfile.get_subformat() - m_comment = getval(batch_job, 'bibdoc_master_comment', comment) - m_description = getval(batch_job, 'bibdoc_master_description', description) - m_subformat = getval(batch_job, 'bibdoc_master_subformat', subformat) + m_comment = getval(batch_job, 'bibdoc_main_comment', comment) + m_description = getval(batch_job, 'bibdoc_main_description', description) + m_subformat = getval(batch_job, 'bibdoc_main_subformat', subformat) if (comment == m_comment and description == m_description and subformat == m_subformat): - found_master = True + found_main = True batch_job['input'] = bibdocfile.get_full_path() ## Get the aspect of the from the record try: @@ -442,12 +442,12 @@ def upload_marcxml_file(marcxml): except IndexError: pass break - if found_master: + if found_main: break - if not found_master: - _task_write_message("Video master for record %d not found" + if not found_main: + _task_write_message("Video main for record %d not found" % batch_job['recid']) - task_update_progress("Video master for record %d not found" + task_update_progress("Video main for record %d not found" % batch_job['recid']) ## Maybe send an email? return 1 @@ -461,10 +461,10 @@ def upload_marcxml_file(marcxml): ## Generate the docname from the input filename's name or given name bibdoc_video_docname, bibdoc_video_extension = decompose_file(batch_job['input'])[1:] - if not bibdoc_video_extension or getval(batch_job, 'bibdoc_master_extension'): - bibdoc_video_extension = getval(batch_job, 'bibdoc_master_extension') - if getval(batch_job, 'bibdoc_master_docname'): - bibdoc_video_docname = getval(batch_job, 'bibdoc_master_docname') + if not bibdoc_video_extension or getval(batch_job, 'bibdoc_main_extension'): + bibdoc_video_extension = getval(batch_job, 'bibdoc_main_extension') + if getval(batch_job, 'bibdoc_main_docname'): + bibdoc_video_docname = getval(batch_job, 'bibdoc_main_docname') write_message("Creating BibDoc for %s" % bibdoc_video_docname) ## If the bibdoc exists, receive it @@ -480,28 +480,28 @@ def upload_marcxml_file(marcxml): #--------# # MASTER # #--------# - if not getval(batch_job, 'update_from_master'): - if getval(batch_job, 'add_master'): - ## Generate the right name for the master - ## The master should be hidden first an then renamed + if not getval(batch_job, 'update_from_main'): + if getval(batch_job, 'add_main'): + ## Generate the right name for the main + ## The main should be hidden first an then renamed ## when it is really available ## !!! FIX !!! - _task_write_message("Adding %s master to the BibDoc" + _task_write_message("Adding %s main to the BibDoc" % bibdoc_video_docname) - master_format = compose_format( + main_format = compose_format( bibdoc_video_extension, - getval(batch_job, 'bibdoc_master_subformat', 'master') + getval(batch_job, 'bibdoc_main_subformat', 'main') ) ## If a file of the same format is there, something is wrong, remove it! ## it might be caused by a previous corrupted submission etc. - if bibdoc_video.format_already_exists_p(master_format): - bibdoc_video.delete_file(master_format, 1) + if bibdoc_video.format_already_exists_p(main_format): + bibdoc_video.delete_file(main_format, 1) bibdoc_video.add_file_new_format( batch_job['input'], version=1, - description=getval(batch_job, 'bibdoc_master_description'), - comment=getval(batch_job, 'bibdoc_master_comment'), - docformat=master_format + description=getval(batch_job, 'bibdoc_main_description'), + comment=getval(batch_job, 'bibdoc_main_comment'), + docformat=main_format ) #-----------# @@ -516,9 +516,9 @@ def upload_marcxml_file(marcxml): _task_write_message("----------- Job %s of %s -----------" % (_BATCH_STEP, _BATCH_STEPS)) - ## Try to substitute docname with master docname + ## Try to substitute docname with main docname if getval(job, 'bibdoc_docname'): - job['bibdoc_docname'] = Template(job['bibdoc_docname']).safe_substitute({'bibdoc_master_docname': bibdoc_video_docname}) + job['bibdoc_docname'] = Template(job['bibdoc_docname']).safe_substitute({'bibdoc_main_docname': bibdoc_video_docname}) #-------------# # TRANSCODING # @@ -541,15 +541,15 @@ def upload_marcxml_file(marcxml): raise Exception("No container/extension defined") ## Get the docname and subformat bibdoc_video_subformat = getval(job, 'bibdoc_subformat') - bibdoc_slave_video_docname = getval(job, 'bibdoc_docname', bibdoc_video_docname) + bibdoc_subordinate_video_docname = getval(job, 'bibdoc_docname', bibdoc_video_docname) ## The subformat is incompatible with ffmpegs name convention ## We do the encoding without and rename it afterwards bibdoc_video_fullpath = compose_file( bibdoc_video_directory, - bibdoc_slave_video_docname, + bibdoc_subordinate_video_docname, bibdoc_video_extension ) - _task_write_message("Transcoding %s to %s;%s" % (bibdoc_slave_video_docname, + _task_write_message("Transcoding %s to %s;%s" % (bibdoc_subordinate_video_docname, bibdoc_video_extension, bibdoc_video_subformat)) ## We encode now directly into the bibdocs directory @@ -582,7 +582,7 @@ def upload_marcxml_file(marcxml): bibdoc_video_extension, bibdoc_video_subformat, 1, - bibdoc_slave_video_docname) + bibdoc_subordinate_video_docname) ) #bibdoc_video._build_file_list() bibdoc_video.touch() @@ -653,7 +653,7 @@ def upload_marcxml_file(marcxml): fname = os.path.join(tmpdir, filename) bibdoc_frame_format = compose_format(bibdoc_frame_extension, bibdoc_frame_subformat) - ## Same as with the master, if the format allready exists, + ## Same as with the main, if the format allready exists, ## override it, because something went wrong before if bibdoc_frame.format_already_exists_p(bibdoc_frame_format): bibdoc_frame.delete_file(bibdoc_frame_format, 1) @@ -698,8 +698,8 @@ def upload_marcxml_file(marcxml): # ADD MASTER METADATA # #---------------------# - if getval(batch_job, 'add_master_metadata'): - _task_write_message("Adding master metadata") + if getval(batch_job, 'add_main_metadata'): + _task_write_message("Adding main metadata") pbcore = pbcore_metadata(input_file = getval(batch_job, 'input'), pbcoreIdentifier = batch_job['recid'], aspect_override = getval(batch_job, 'aspect')) diff --git a/modules/bibencode/lib/bibencode_config.py b/modules/bibencode/lib/bibencode_config.py index 670b7aefc..bfee2cb20 100644 --- a/modules/bibencode/lib/bibencode_config.py +++ b/modules/bibencode/lib/bibencode_config.py @@ -122,7 +122,7 @@ CFG_BIBENCODE_FFMPEG_RE_VIDEOINFO_ASTREAM = re.compile("^\s*Stream #(\d+.\d+)\(?(\w+)?\)?: Audio: ([a-zA-Z0-9\(\) ]*), (\d+) Hz, ([a-zA-Z0-9 ]+), (\w+), (\d+) kb\/s$") ## FFMPEG command for setting metadata -## This will create a copy of the master and write the metadata there +## This will create a copy of the main and write the metadata there CFG_BIBENCODE_FFMPEG_METADATA_SET_COMMAND = "ffmpeg -y -i %s -acodec copy -vcodec copy %s" ## FFMPEG metadata argument template diff --git a/modules/bibfield/lib/bibfield.py b/modules/bibfield/lib/bibfield.py index 051940df9..c585471e3 100644 --- a/modules/bibfield/lib/bibfield.py +++ b/modules/bibfield/lib/bibfield.py @@ -61,7 +61,7 @@ def plugin_builder(plugin_name, plugin_code): -def create_record(blob, master_format='marc', verbose=0, **additional_info): +def create_record(blob, main_format='marc', verbose=0, **additional_info): """ Creates a record object from the blob description using the apropiate reader for it. @@ -69,12 +69,12 @@ def create_record(blob, master_format='marc', verbose=0, **additional_info): @return Record object """ - reader = CFG_BIBFIELD_READERS['bibfield_%sreader.py' % (master_format,)](blob, **additional_info) + reader = CFG_BIBFIELD_READERS['bibfield_%sreader.py' % (main_format,)](blob, **additional_info) return Record(reader.translate()) -def create_records(blob, master_format='marc', verbose=0, **additional_info): +def create_records(blob, main_format='marc', verbose=0, **additional_info): """ Creates a list of records from the blod descriptions using the split_records function to divide then. @@ -83,9 +83,9 @@ def create_records(blob, master_format='marc', verbose=0, **additional_info): @return List of record objects initiated by the functions create_record() """ - record_blods = CFG_BIBFIELD_READERS['bibfield_%sreader.py' % (master_format,)].split_blob(blob, additional_info.get('schema', None)) + record_blods = CFG_BIBFIELD_READERS['bibfield_%sreader.py' % (main_format,)].split_blob(blob, additional_info.get('schema', None)) - return [create_record(record_blob, master_format, verbose=verbose, **additional_info) for record_blob in record_blods] + return [create_record(record_blob, main_format, verbose=verbose, **additional_info) for record_blob in record_blods] def get_record(recid, reset_cache=False, fields=()): @@ -113,18 +113,18 @@ def get_record(recid, reset_cache=False, fields=()): #Then retrieve information and blob if not record or reset_cache: try: - master_format = run_sql("SELECT master_format FROM bibrec WHERE id=%s", (recid,))[0][0] + main_format = run_sql("SELECT main_format FROM bibrec WHERE id=%s", (recid,))[0][0] except: return None schema = 'xml' - master_format = 'marc' + main_format = 'marc' try: from invenio.search_engine import print_record blob = print_record(recid, format='xm') except: return None - reader = CFG_BIBFIELD_READERS['bibfield_%sreader.py' % (master_format,)](blob, schema=schema) + reader = CFG_BIBFIELD_READERS['bibfield_%sreader.py' % (main_format,)](blob, schema=schema) record = Record(reader.translate()) #Update bibfmt for future uses run_sql("REPLACE INTO bibfmt(id_bibrec, format, last_updated, value) VALUES (%s, 'recjson', NOW(), %s)", diff --git a/modules/bibfield/lib/bibfield_config_engine.py b/modules/bibfield/lib/bibfield_config_engine.py index da151f1af..18b4f60c3 100644 --- a/modules/bibfield/lib/bibfield_config_engine.py +++ b/modules/bibfield/lib/bibfield_config_engine.py @@ -59,14 +59,14 @@ def _create_field_parser(): calculated ::= "calculated:" INDENT derived_calculated_body UNDENT derived_calculated_body ::= [decorators] "," python_allowed_exp - decorators ::= (peristent_identfier | legacy | do_not_cache | parse_first | depends_on | only_if | only_if_master_value)* + decorators ::= (peristent_identfier | legacy | do_not_cache | parse_first | depends_on | only_if | only_if_main_value)* peristent_identfier ::= @persitent_identifier( level ) legacy ::= "@legacy(" correspondences+ ")" correspondences ::= "(" source_tag [ "," tag_name ] "," json_id ")" parse_first ::= "@parse_first(" jsonid+ ")" depends_on ::= "@depends_on(" json_id+ ")" only_if ::= "@only_if(" python_condition+ ")" - only_if_master_value ::= "@only_if_master_value(" python_condition+ ")" + only_if_main_value ::= "@only_if_main_value(" python_condition+ ")" inherit_from ::= "@inherit_from()" @@ -129,9 +129,9 @@ def do_unindent(): .setResultsName("legacy", listAllMatches=True) only_if = (Suppress("@only_if") + originalTextFor(nestedExpr("(", ")")))\ .setResultsName("only_if") - only_if_master_value = (Suppress("@only_if_value") + \ + only_if_main_value = (Suppress("@only_if_value") + \ originalTextFor(nestedExpr("(", ")")))\ - .setResultsName("only_if_master_value") + .setResultsName("only_if_main_value") depends_on = (Suppress("@depends_on") + \ originalTextFor(nestedExpr("(", ")")))\ .setResultsName("depends_on") @@ -141,7 +141,7 @@ def do_unindent(): memoize = (Suppress("@memoize") + nestedExpr("(", ")"))\ .setResultsName("memoize") field_decorator = parse_first ^ depends_on ^ only_if ^ \ - only_if_master_value ^ memoize ^ legacy + only_if_main_value ^ memoize ^ legacy #Independent decorators inherit_from = (Suppress("@inherit_from") + \ @@ -151,9 +151,9 @@ def do_unindent(): .setResultsName("override") extend = (Suppress("@") + "extend")\ .setResultsName("extend") - master_format = (Suppress("@master_format") + \ + main_format = (Suppress("@main_format") + \ originalTextFor(nestedExpr("(", ")")))\ - .setResultsName("master_format") \ + .setResultsName("main_format") \ .setParseAction(lambda toks: toks[0]) derived_calculated_body = (ZeroOrMore(field_decorator) + python_allowed_expr)\ @@ -190,7 +190,7 @@ def do_unindent(): .setResultsName('json_ext') #Checker - checker_function = (Optional(master_format) + ZeroOrMore(ident + ".") + ident + originalTextFor(nestedExpr('(', ')')))\ + checker_function = (Optional(main_format) + ZeroOrMore(ident + ".") + ident + originalTextFor(nestedExpr('(', ')')))\ .setResultsName("checker", listAllMatches=True) checker = ("checker" + Suppress(":") + INDENT + OneOrMore(checker_function) + UNDENT) @@ -246,7 +246,7 @@ class BibFieldParser(object): """Dictionary containing all the rules needed to create and validate json fields""" _legacy_field_matchings = {} - """Dictionary containing matching between the legacy master format and the current json""" + """Dictionary containing matching between the legacy main format and the current json""" def __init__(self, base_dir=CFG_ETCDIR + '/bibfield', @@ -351,8 +351,8 @@ def _create_rule(self, rule, override=False, extend=False): extend: True/False, aliases: [], persistent_identifier: num/None, - rules: {'master_format_1': [{rule1}, {rule2}, ...], - 'master_format_2': [....], + rules: {'main_format_1': [{rule1}, {rule2}, ...], + 'main_format_2': [....], ...... 'calculated': [....], 'derived': [...]} @@ -365,9 +365,9 @@ def _create_rule(self, rule, override=False, extend=False): 'parse_first' : (parse_first_json_ids), 'depends_on' : (depends_on_json_id), 'only_if' : (only_if_boolean_expressions), - 'only_if_master_value': (only_if_master_value_boolean_expressions), + 'only_if_main_value': (only_if_main_value_boolean_expressions), 'memoize' : time, - 'value' : value coming from master format + 'value' : value coming from main format } """ @@ -427,7 +427,7 @@ def _create_rule(self, rule, override=False, extend=False): if source not in rules: #Allow several tags point to the same json id rules[source] = [] - (depends_on, only_if, only_if_master_value, + (depends_on, only_if, only_if_main_value, parse_first, memoize) = self.__create_decorators_content(r) self._create_legacy_rules(r.legacy, json_id, source) @@ -435,7 +435,7 @@ def _create_rule(self, rule, override=False, extend=False): 'parse_first' : parse_first, 'depends_on' : depends_on, 'only_if' : only_if, - 'only_if_master_value': only_if_master_value, + 'only_if_main_value': only_if_main_value, 'memoize' : memoize, 'value' : compile(r.value[0].strip(), '', 'eval'), }) @@ -508,15 +508,15 @@ def __create_checker(self, rule): checkers = [] for checker in rule.checker: - if checker.master_format: - master_format = eval(rule.master_format) + if checker.main_format: + main_format = eval(rule.main_format) checker_function_name = checker[1] arguments = checker[2][1:-1] else: - master_format = ('all',) + main_format = ('all',) checker_function_name = checker[0] arguments = checker[1][1:-1] - checkers.append((master_format, checker_function_name, arguments)) + checkers.append((main_format, checker_function_name, arguments)) self.__class__._field_definitions[json_id]['checker'] = checkers @@ -551,14 +551,14 @@ def __create_decorators_content(self, rule): """ Extracts from the rule all the possible decorators. """ - depends_on = only_if = only_if_master_value = parse_first = memoize = None + depends_on = only_if = only_if_main_value = parse_first = memoize = None if rule.depends_on: depends_on = rule.depends_on[0] if rule.only_if: only_if = rule.only_if[0] - if rule.only_if_master_value: - only_if_master_value = rule.only_if_master_value[0] + if rule.only_if_main_value: + only_if_main_value = rule.only_if_main_value[0] if rule.parse_first: parse_first = rule.parse_first[0] if rule.memoize: @@ -567,7 +567,7 @@ def __create_decorators_content(self, rule): except IndexError: memoize = 300 # FIXME: Default value will be used - return (depends_on, only_if, only_if_master_value, parse_first, memoize) + return (depends_on, only_if, only_if_main_value, parse_first, memoize) def __resolve_inherit_rules(self): """ @@ -619,7 +619,7 @@ def __resolve_extend_rules(self): self._create_rule(rule, extend=True) -def guess_legacy_field_names(fields, master_format): +def guess_legacy_field_names(fields, main_format): """ Using the legacy rules written in the config file (@legacy) tries to find the equivalent json field for one or more legacy fields. @@ -632,7 +632,7 @@ def guess_legacy_field_names(fields, master_format): fields = (fields, ) for field in fields: try: - res[field] = BibFieldParser.legacy_field_matchings()[master_format].get(field, []) + res[field] = BibFieldParser.legacy_field_matchings()[main_format].get(field, []) except: res[field] = [] return res diff --git a/modules/bibfield/lib/bibfield_jsonreader.py b/modules/bibfield/lib/bibfield_jsonreader.py index 8536678c0..60f22c6d1 100644 --- a/modules/bibfield/lib/bibfield_jsonreader.py +++ b/modules/bibfield/lib/bibfield_jsonreader.py @@ -24,14 +24,14 @@ class JsonReader(Reader): """Default reader""" - __master_format__ = 'json' + __main_format__ = 'json' def __init__(self, blob, **kwargs): """ :param blob: """ super(JsonReader, self).__init__(blob=blob, **kwargs) - self._additional_info['master_format'] = 'json' + self._additional_info['main_format'] = 'json' @staticmethod def split_blob(blob, schema=None, **kwargs): diff --git a/modules/bibfield/lib/bibfield_marcreader.py b/modules/bibfield/lib/bibfield_marcreader.py index 796ba04d7..8b426d3e9 100644 --- a/modules/bibfield/lib/bibfield_marcreader.py +++ b/modules/bibfield/lib/bibfield_marcreader.py @@ -31,7 +31,7 @@ class MarcReader(Reader): """Marc reader""" - __master_format__ = 'marc' + __main_format__ = 'marc' split_marc = re.compile('.*?', re.DOTALL) @@ -40,7 +40,7 @@ def __init__(self, blob=None, **kwargs): :param blob: """ super(MarcReader, self).__init__(blob=blob, **kwargs) - self._additional_info['master_format'] = 'marc' + self._additional_info['main_format'] = 'marc' @staticmethod def split_blob(blob, schema=None, **kwargs): diff --git a/modules/bibfield/lib/bibfield_marcreader_unit_tests.py b/modules/bibfield/lib/bibfield_marcreader_unit_tests.py index a17a40eae..867049093 100644 --- a/modules/bibfield/lib/bibfield_marcreader_unit_tests.py +++ b/modules/bibfield/lib/bibfield_marcreader_unit_tests.py @@ -452,7 +452,7 @@ def test_rec_json_creation_from_marcxml(self): reader = MarcReader(blob=xml, schema="xml") r = Record(reader.translate()) - self.assertTrue(r['__meta_metadata__.__additional_info__.master_format'] == 'marc') + self.assertTrue(r['__meta_metadata__.__additional_info__.main_format'] == 'marc') self.assertTrue('authors' in r) self.assertTrue(r['authors[0].full_name'] == "Efstathiou, G P") self.assertTrue(len(r['authors']) == 5) diff --git a/modules/bibfield/lib/bibfield_reader.py b/modules/bibfield/lib/bibfield_reader.py index f7871c677..6c54483bd 100644 --- a/modules/bibfield/lib/bibfield_reader.py +++ b/modules/bibfield/lib/bibfield_reader.py @@ -79,7 +79,7 @@ def translate(self): It transforms the incoming blob into a json structure using the rules described into the field and model definitions. To apply this rules it takes into account the type of the reader, which - in fact means the type of the source format or `master_format` + in fact means the type of the source format or `main_format` :return: Json structure (typically a dictionary) """ @@ -166,8 +166,8 @@ def set(self, json, field): .append("Adding a new field '%s' without definition" % (field)) try: - if self.json['__meta_metadata__']['__additional_info__']['master_format'] in rule['rules']: - rule_def = rule['rules'][self.json['__meta_metadata__']['__additional_info__']['master_format']][0] + if self.json['__meta_metadata__']['__additional_info__']['main_format'] in rule['rules']: + rule_def = rule['rules'][self.json['__meta_metadata__']['__additional_info__']['main_format']][0] rule_type = 'creator' elif 'derived' in rule['rules']: rule_def = rule['rules']['derived'][0] @@ -275,7 +275,7 @@ def _apply_rules(self, json_id, field_name, rule_def): """Tries to apply a 'creator' rule""" applied = False for rule in rule_def['rules'].get( - self.json['__meta_metadata__']['__additional_info__']['master_format'], []): + self.json['__meta_metadata__']['__additional_info__']['main_format'], []): elements = self._get_elements_from_blob(rule['source_tag']) if not elements: self._set_default_value(json_id, field_name) @@ -303,8 +303,8 @@ def _apply_rules(self, json_id, field_name, rule_def): element = (element, ) applied = False for e in element: - if rule['only_if_master_value'] and \ - not all(try_to_eval(rule['only_if_master_value'], self.functions, value=e, self=self.json)): + if rule['only_if_main_value'] and \ + not all(try_to_eval(rule['only_if_main_value'], self.functions, value=e, self=self.json)): applied = applied or False else: try: diff --git a/modules/bibfield/lib/bibfield_regression_tests.py b/modules/bibfield/lib/bibfield_regression_tests.py index d7231ec5a..d3884dbe8 100644 --- a/modules/bibfield/lib/bibfield_regression_tests.py +++ b/modules/bibfield/lib/bibfield_regression_tests.py @@ -110,7 +110,7 @@ def setUp(self): f = open(CFG_TMPDIR + '/demobibdata.xml', 'r') blob = f.read() f.close() - self.recs = [rec for rec in create_records(blob, master_format='marc', schema='xml')] + self.recs = [rec for rec in create_records(blob, main_format='marc', schema='xml')] def test_records_created(self): """ BibField - demo file how many records are created """ @@ -128,14 +128,14 @@ def test_create_record_with_collection_tag(self): """ - record = create_record(blob, master_format='marc', schema='xml') - record1 = create_records(blob, master_format='marc', schema='xml')[0] + record = create_record(blob, main_format='marc', schema='xml') + record1 = create_records(blob, main_format='marc', schema='xml')[0] self.assertEqual(record1.keys(), record.keys()) # def test_empty_collection(self): # """bibfield - empty collection""" # blob_error0 = """""" - # rec = create_record(blob_error0, master_format='marc', schema='xml') + # rec = create_record(blob_error0, main_format='marc', schema='xml') # self.assertTrue(rec.is_empty()) # records = create_records(blob_error0) # self.assertEqual(len(records), 0) @@ -234,7 +234,7 @@ def test_fft_url_tags(self): PICTURE """ - rec = create_record(marc_blob, master_format='marc', schema='xml') + rec = create_record(marc_blob, main_format='marc', schema='xml') self.assertTrue('fft' in rec) self.assertTrue(len(rec['fft']) == 1) self.assertTrue(rec['fft[0].path'] == "http://invenio-software.org/download/invenio-demo-site-files/6206002.jpg") diff --git a/modules/bibfield/lib/bibfield_utils.py b/modules/bibfield/lib/bibfield_utils.py index 93836dee8..3c0a47c2b 100644 --- a/modules/bibfield/lib/bibfield_utils.py +++ b/modules/bibfield/lib/bibfield_utils.py @@ -108,7 +108,7 @@ def __setitem__(self, key, value): self._dict_bson[key] = value else: from invenio.bibfield import CFG_BIBFIELD_READERS as readers - reader = readers['bibfield_%sreader.py' % (self['__meta_metadata__']['__additional_info__']['master_format'], )]() + reader = readers['bibfield_%sreader.py' % (self['__meta_metadata__']['__additional_info__']['main_format'], )]() reader.set(self, main_key) self._dict_bson[key] = value @@ -154,7 +154,7 @@ def check_record(self, reset=True): def check_rules(checker_functions, key): """docstring for check_rule""" for checker_function in checker_functions: - if 'all' in checker_function[0] or self['__meta_metadata__.__additional_info__.master_format'] in checker_function[0]: + if 'all' in checker_function[0] or self['__meta_metadata__.__additional_info__.main_format'] in checker_function[0]: try: try_to_eval("%s(self,'%s',%s)" % (checker_function[1], key, checker_function[2])) except InvenioBibFieldContinuableError, err: diff --git a/modules/bibformat/lib/bibformat_bfx_engine.py b/modules/bibformat/lib/bibformat_bfx_engine.py index f702fa38a..9aacd0563 100644 --- a/modules/bibformat/lib/bibformat_bfx_engine.py +++ b/modules/bibformat/lib/bibformat_bfx_engine.py @@ -894,7 +894,7 @@ def get_object(self, name): def build_object(self, name): ''' Build the object from the list of addresses - A slave function for get_object. + A subordinate function for get_object. ''' new_object = {} parent_name = self.memory[name]['parent']; diff --git a/modules/bibformat/lib/bibformat_dblayer.py b/modules/bibformat/lib/bibformat_dblayer.py index 439c04f75..afec97e9d 100644 --- a/modules/bibformat/lib/bibformat_dblayer.py +++ b/modules/bibformat/lib/bibformat_dblayer.py @@ -432,16 +432,16 @@ def get_preformatted_record(recID, of, decompress=zlib.decompress): @param decompress: the method used to decompress the preformatted record in database @return: formatted record as String, or None if not exist """ - # Decide whether to use DB slave: + # Decide whether to use DB subordinate: if of in ('xm', 'recstruct'): - run_on_slave = False # for master formats, use DB master + run_on_subordinate = False # for main formats, use DB main else: - run_on_slave = True # for other formats, we can use DB slave + run_on_subordinate = True # for other formats, we can use DB subordinate # Try to fetch preformatted record query = """SELECT value, needs_2nd_pass FROM bibfmt WHERE id_bibrec = %s AND format = %s""" params = (recID, of) - res = run_sql(query, params, run_on_slave=run_on_slave) + res = run_sql(query, params, run_on_subordinate=run_on_subordinate) if res: value = decompress(res[0][0]) needs_2nd_pass = bool(res[0][1]) @@ -462,14 +462,14 @@ def get_preformatted_record_date(recID, of): @param of: the output format code @return: the date of the last update of the cache, or None if not exist """ - # Decide whether to use DB slave: + # Decide whether to use DB subordinate: if of in ('xm', 'recstruct'): - run_on_slave = False # for master formats, use DB master + run_on_subordinate = False # for main formats, use DB main else: - run_on_slave = True # for other formats, we can use DB slave + run_on_subordinate = True # for other formats, we can use DB subordinate # Try to fetch preformatted record query = "SELECT last_updated FROM bibfmt WHERE id_bibrec='%s' AND format='%s'" % (recID, of) - res = run_sql(query, run_on_slave=run_on_slave) + res = run_sql(query, run_on_subordinate=run_on_subordinate) if res: # record 'recID' is formatted in 'of', so return it return "%s" % res[0][0] diff --git a/modules/bibindex/lib/bibindex_engine.py b/modules/bibindex/lib/bibindex_engine.py index 1a83a2f4c..d236199b2 100644 --- a/modules/bibindex/lib/bibindex_engine.py +++ b/modules/bibindex/lib/bibindex_engine.py @@ -1490,7 +1490,7 @@ def find_nonmarc_records(self, recID1, recID2): of nonMarc type""" marc = range(recID1, recID2 + 1) nonmarc = [] - query = """SELECT id FROM %s WHERE master_format <> 'marc' + query = """SELECT id FROM %s WHERE main_format <> 'marc' AND id BETWEEN %%s AND %%s""" % "bibrec" res = run_sql(query, (recID1, recID2)) if res: diff --git a/modules/bibindex/lib/bibindex_engine_stemmer_greek.py b/modules/bibindex/lib/bibindex_engine_stemmer_greek.py index 60cf1b60d..6466551fb 100644 --- a/modules/bibindex/lib/bibindex_engine_stemmer_greek.py +++ b/modules/bibindex/lib/bibindex_engine_stemmer_greek.py @@ -133,7 +133,7 @@ class GreekStemmer(object): implementation by Panos Kyriakakis (http://www.salix.gr/greek_stemmer) which in turn is based on a JavaScript implementation by Georgios Ntais (http://people.dsv.su.se/~hercules/greek_stemmer.gr.html) - for his master thesis "Development of a Stemmer for the Greek Language" + for his main thesis "Development of a Stemmer for the Greek Language" (http://people.dsv.su.se/~hercules/papers/Ntais_greek_stemmer_thesis_final.pdf). """ diff --git a/modules/bibmerge/lib/bibmerge_engine.py b/modules/bibmerge/lib/bibmerge_engine.py index a0da3f897..af00872a9 100644 --- a/modules/bibmerge/lib/bibmerge_engine.py +++ b/modules/bibmerge/lib/bibmerge_engine.py @@ -146,7 +146,7 @@ def perform_request_record(requestType, uid, data): if requestType == 'submit': if data.has_key('duplicate'): recid2 = data['duplicate'] - record2 = _get_record_slave(recid2, result, 'recid', uid) + record2 = _get_record_subordinate(recid2, result, 'recid', uid) if result['resultCode'] != 0: #return in case of error return result (errcode, message) = check_doi_status_after_merge(data["recID1"], data['duplicate'], @@ -162,7 +162,7 @@ def perform_request_record(requestType, uid, data): record_add_field(record2, '980', ' ', ' ', '', [('c', 'DELETED')]) # mark record2 as duplicate of record1 record_add_field(record2, '970', ' ', ' ', '', [('d', str(recid1))]) - # add recid of deleted record to master record + # add recid of deleted record to main record record_add_field(record1, '981', ' ', ' ', '', [('a', str(recid2))]) # To ensure updates happen in order, use a seq id @@ -210,7 +210,7 @@ def perform_request_record(requestType, uid, data): recid2 = data["recID2"] mode = data['record2Mode'] - record2 = _get_record_slave(recid2, result, mode, uid) + record2 = _get_record_subordinate(recid2, result, mode, uid) if result['resultCode'] != 0: #if record not accessible return error information return result @@ -261,7 +261,7 @@ def perform_request_update_record(requestType, uid, data): redo_list = [] mode = data['record2Mode'] - record2 = _get_record_slave(recid2, result, mode, uid) + record2 = _get_record_subordinate(recid2, result, mode, uid) if result['resultCode'] != 0: #if record not accessible return error information return result @@ -342,7 +342,7 @@ def perform_small_request_update_record(requestType, uid, data): disabled_hp_changes = cache_content[4] mode = data['record2Mode'] - record2 = _get_record_slave(recid2, result, mode, uid) + record2 = _get_record_subordinate(recid2, result, mode, uid) if result['resultCode'] != 0: #if record not accessible return error information return result @@ -406,7 +406,7 @@ def _get_record(recid, uid, result, fresh_record=False): record_order_subfields(record) return record -def _get_record_slave(recid, result, mode=None, uid=None): +def _get_record_subordinate(recid, result, mode=None, uid=None): """Check if record exists and return it in dictionary format. If any kind of error occurs returns None. If mode=='revision' then recid parameter is considered as revid.""" @@ -495,13 +495,13 @@ def check_doi_status_after_merge(original_recid1, original_recid2, final_record1 by the system, and that not duplicate DOI would be created. Returns a tuple(error_code, message). - @param original_recid1: the record ID of the original record 1 (master) + @param original_recid1: the record ID of the original record 1 (main) @type original_recid1: int - @param original_recid2: the record ID of the original record 2 (slave) + @param original_recid2: the record ID of the original record 2 (subordinate) @type original_recid2: int @param final_record1: the resulting merged record @type final_record1: BibRecord object - @param final_record_2: the resulting slave "merged" record (optional when record2_marked_as_duplicate_p is False) + @param final_record_2: the resulting subordinate "merged" record (optional when record2_marked_as_duplicate_p is False) @type final_record_2: BibRecord object @param record2_marked_as_duplicate_p: True if the record 2 will be marked as duplicate (and deleted) @type record2_marked_as_duplicate_p: bool @@ -516,7 +516,7 @@ def check_doi_status_after_merge(original_recid1, original_recid2, final_record1 internal_only_p=True) original_record2_dois = get_dois(create_record(print_record(original_recid2, 'xm'))[0]) - # Are there any DOI from record 1 (master) lost in the merging? + # Are there any DOI from record 1 (main) lost in the merging? lost_dois_in_record1 = [doi for doi in original_record1_managed_dois \ if not doi in new_record1_managed_dois] @@ -528,7 +528,7 @@ def check_doi_status_after_merge(original_recid1, original_recid2, final_record1 new_record2_managed_dois = get_dois(final_record_2, internal_only_p=True) original_record2_managed_dois = get_dois(create_record(print_record(original_recid2, 'xm'))[0], internal_only_p=True) - # Are there any DOI from record 2 (slave) lost in the merging? + # Are there any DOI from record 2 (subordinate) lost in the merging? lost_dois_in_record2 = [doi for doi in original_record2_managed_dois \ if not doi in new_record1_managed_dois] else: diff --git a/modules/bibmerge/lib/bibmerge_templates.py b/modules/bibmerge/lib/bibmerge_templates.py index d7760f50c..42a74837e 100644 --- a/modules/bibmerge/lib/bibmerge_templates.py +++ b/modules/bibmerge/lib/bibmerge_templates.py @@ -117,10 +117,10 @@ def controlpanel(self, recid=None): Merge non-conflicting - Edit master record + Edit main record - Edit slave record + Edit subordinate record @@ -144,7 +144,7 @@ def controlpanel(self, recid=None): - delete slave record as duplicate + delete subordinate record as duplicate diff --git a/modules/bibsched/lib/tasklets/bst_create_icons.py b/modules/bibsched/lib/tasklets/bst_create_icons.py index 3b42eb78e..456d7800b 100644 --- a/modules/bibsched/lib/tasklets/bst_create_icons.py +++ b/modules/bibsched/lib/tasklets/bst_create_icons.py @@ -48,9 +48,9 @@ def create_icons_for_record(recid, icon_sizes, icon_format_mappings=None, @param icon_sizes: the list of icon sizes that need to be generated. Note that upscaled is not allowed @type icon_sizes: list - @param icon_format_mappings: defines for each "master" format in + @param icon_format_mappings: defines for each "main" format in which format the icons should be - created. If the master format is + created. If the main format is not specified here, then its icons will be created in the same format, if possible (for eg. the icons of a @@ -157,9 +157,9 @@ def bst_create_icons(recid, icon_sizes, icon_format_mappings=None, @param collection: the collection name on which to run the task; if recid is defined, collection will be ignored @type collection: string - @param icon_format_mappings: defines for each "master" format in + @param icon_format_mappings: defines for each "main" format in which format the icons should be - created. If the master format is + created. If the main format is not specified here, then its icons will be created in the same format, if possible (for eg. the icons of a @@ -168,7 +168,7 @@ def bst_create_icons(recid, icon_sizes, icon_format_mappings=None, would be created in JPG) and unless a default mapping is not provided in C{CFG_ICON_CREATION_FORMAT_MAPPINGS}. - Use syntax masterextension-targetextension1,targetextension2 + Use syntax mainextension-targetextension1,targetextension2 (eg. "doc->png,jpg" or "png-jpg") Use '*' to target extensions not matched by other rules (if diff --git a/modules/docextract/lib/docextract_record_regression_tests.py b/modules/docextract/lib/docextract_record_regression_tests.py index df77f62c0..f035581ba 100644 --- a/modules/docextract/lib/docextract_record_regression_tests.py +++ b/modules/docextract/lib/docextract_record_regression_tests.py @@ -43,7 +43,7 @@ def order_by_tag(field1, field2): self.records_cache = {} self.xml_cache = {} for recid in perform_request_search(p=""): - r = run_sql("SELECT master_format FROM bibrec WHERE id=%s", [recid]) + r = run_sql("SELECT main_format FROM bibrec WHERE id=%s", [recid]) self.assertTrue(r, msg="bibrec row for %s missing" % recid) if r[0][0] != 'marc': continue diff --git a/modules/miscutil/lib/dbdump.py b/modules/miscutil/lib/dbdump.py index ea93200ff..c69691612 100644 --- a/modules/miscutil/lib/dbdump.py +++ b/modules/miscutil/lib/dbdump.py @@ -32,7 +32,7 @@ CFG_DATABASE_NAME, \ CFG_DATABASE_PORT, \ CFG_DATABASE_SLAVE, \ - get_connection_for_dump_on_slave, \ + get_connection_for_dump_on_subordinate, \ run_sql from invenio.bibtask import task_init, \ write_message, \ @@ -66,55 +66,55 @@ def _delete_old_dumps(dirname, filename, number_to_keep): write_message("... deleting %s" % dirname + os.sep + afile) os.remove(dirname + os.sep + afile) -def check_slave_is_up(connection=None): - """Raise an StandardError in case the slave is not correctly up.""" +def check_subordinate_is_up(connection=None): + """Raise an StandardError in case the subordinate is not correctly up.""" if connection is None: - connection = get_connection_for_dump_on_slave() + connection = get_connection_for_dump_on_subordinate() res = run_sql("SHOW SLAVE STATUS", with_dict=True, connection=connection) - if res[0]['Slave_IO_Running'] != 'Yes': - raise StandardError("Slave_IO_Running is not set to 'Yes'") - if res[0]['Slave_SQL_Running'] != 'Yes': - raise StandardError("Slave_SQL_Running is not set to 'Yes'") + if res[0]['Subordinate_IO_Running'] != 'Yes': + raise StandardError("Subordinate_IO_Running is not set to 'Yes'") + if res[0]['Subordinate_SQL_Running'] != 'Yes': + raise StandardError("Subordinate_SQL_Running is not set to 'Yes'") -def check_slave_is_down(connection=None): - """Raise an StandardError in case the slave is not correctly down.""" +def check_subordinate_is_down(connection=None): + """Raise an StandardError in case the subordinate is not correctly down.""" if connection is None: - connection = get_connection_for_dump_on_slave() + connection = get_connection_for_dump_on_subordinate() res = run_sql("SHOW SLAVE STATUS", with_dict=True, connection=connection) - if res[0]['Slave_SQL_Running'] != 'No': - raise StandardError("Slave_SQL_Running is not set to 'No'") + if res[0]['Subordinate_SQL_Running'] != 'No': + raise StandardError("Subordinate_SQL_Running is not set to 'No'") -def detach_slave(connection=None): - """Detach the slave.""" +def detach_subordinate(connection=None): + """Detach the subordinate.""" if connection is None: - connection = get_connection_for_dump_on_slave() + connection = get_connection_for_dump_on_subordinate() run_sql("STOP SLAVE SQL_THREAD", connection=connection) - check_slave_is_down(connection) + check_subordinate_is_down(connection) -def attach_slave(connection=None): - """Attach the slave.""" +def attach_subordinate(connection=None): + """Attach the subordinate.""" if connection is None: - connection = get_connection_for_dump_on_slave() + connection = get_connection_for_dump_on_subordinate() run_sql("START SLAVE", connection=connection) - check_slave_is_up(connection) + check_subordinate_is_up(connection) -def check_slave_is_in_consistent_state(connection=None): +def check_subordinate_is_in_consistent_state(connection=None): """ - Check if the slave is already aware that dbdump task is running. + Check if the subordinate is already aware that dbdump task is running. dbdump being a monotask, guarantee that no other task is currently - running and it's hence safe to detach the slave and start the + running and it's hence safe to detach the subordinate and start the actual dump. """ if connection is None: - connection = get_connection_for_dump_on_slave() + connection = get_connection_for_dump_on_subordinate() i = 0 ## Let's take the current status of dbdump (e.g. RUNNING, ABOUT TO STOP, etc.)... current_status = run_sql("SELECT status FROM schTASK WHERE id=%s", (task_get_task_param('task_id'), ))[0][0] while True: if i == 10: ## Timeout!! - raise StandardError("The slave seems not to pick up with the master") - ## ...and let's see if it matches with what the slave sees. + raise StandardError("The subordinate seems not to pick up with the main") + ## ...and let's see if it matches with what the subordinate sees. if run_sql("SELECT status FROM schTASK WHERE id=%s AND status=%s", (task_get_task_param('task_id'), current_status), connection=connection): ## Bingo! return @@ -232,15 +232,15 @@ def _dbdump_elaborate_submit_param(key, value, dummyopts, dummyargs): if not CFG_PATH_GZIP or (CFG_PATH_GZIP and not os.path.exists(CFG_PATH_GZIP)): raise StandardError("ERROR: No valid gzip path is defined.") task_set_option('compress', True) - elif key in ('-S', '--slave'): + elif key in ('-S', '--subordinate'): if value: - task_set_option('slave', value) + task_set_option('subordinate', value) else: if not CFG_DATABASE_SLAVE: - raise StandardError("ERROR: No slave defined.") - task_set_option('slave', CFG_DATABASE_SLAVE) - elif key in ('--dump-on-slave-helper', ): - task_set_option('dump_on_slave_helper_mode', True) + raise StandardError("ERROR: No subordinate defined.") + task_set_option('subordinate', CFG_DATABASE_SLAVE) + elif key in ('--dump-on-subordinate-helper', ): + task_set_option('dump_on_subordinate_helper_mode', True) elif key in ('--ignore-tables',): try: re.compile(value) @@ -264,16 +264,16 @@ def _dbdump_run_task_core(): port = CFG_DATABASE_PORT connection = None try: - if task_get_option('slave') and not task_get_option('dump_on_slave_helper_mode'): - connection = get_connection_for_dump_on_slave() - write_message("Dump on slave requested") - write_message("... checking if slave is well up...") - check_slave_is_up(connection) - write_message("... checking if slave is in consistent state...") - check_slave_is_in_consistent_state(connection) - write_message("... detaching slave database...") - detach_slave(connection) - write_message("... scheduling dump on slave helper...") + if task_get_option('subordinate') and not task_get_option('dump_on_subordinate_helper_mode'): + connection = get_connection_for_dump_on_subordinate() + write_message("Dump on subordinate requested") + write_message("... checking if subordinate is well up...") + check_subordinate_is_up(connection) + write_message("... checking if subordinate is in consistent state...") + check_subordinate_is_in_consistent_state(connection) + write_message("... detaching subordinate database...") + detach_subordinate(connection) + write_message("... scheduling dump on subordinate helper...") helper_arguments = [] if task_get_option("number"): helper_arguments += ["--number", str(task_get_option("number"))] @@ -285,18 +285,18 @@ def _dbdump_run_task_core(): helper_arguments += ["--ignore-tables", str(task_get_option("ignore_tables"))] if task_get_option("compress"): helper_arguments += ["--compress"] - if task_get_option("slave"): - helper_arguments += ["--slave", str(task_get_option("slave"))] - helper_arguments += ['-N', 'slavehelper', '--dump-on-slave-helper'] + if task_get_option("subordinate"): + helper_arguments += ["--subordinate", str(task_get_option("subordinate"))] + helper_arguments += ['-N', 'subordinatehelper', '--dump-on-subordinate-helper'] task_id = task_low_level_submission('dbdump', task_get_task_param('user'), '-P4', *helper_arguments) - write_message("Slave scheduled with ID %s" % task_id) + write_message("Subordinate scheduled with ID %s" % task_id) task_update_progress("DONE") return True - elif task_get_option('dump_on_slave_helper_mode'): - write_message("Dumping on slave mode") - connection = get_connection_for_dump_on_slave() - write_message("... checking if slave is well down...") - check_slave_is_down(connection) + elif task_get_option('dump_on_subordinate_helper_mode'): + write_message("Dumping on subordinate mode") + connection = get_connection_for_dump_on_subordinate() + write_message("... checking if subordinate is well down...") + check_subordinate_is_down(connection) host = CFG_DATABASE_SLAVE task_update_progress("Reading parameters") @@ -305,7 +305,7 @@ def _dbdump_run_task_core(): output_num = task_get_option('number', 5) params = task_get_option('params', None) compress = task_get_option('compress', False) - slave = task_get_option('slave', False) + subordinate = task_get_option('subordinate', False) ignore_tables = task_get_option('ignore_tables', None) if ignore_tables: ignore_tables = get_table_names(ignore_tables) @@ -322,8 +322,8 @@ def _dbdump_run_task_core(): task_update_progress("Dumping database") write_message("Database dump started") - if slave: - output_file_prefix = 'slave-%s-dbdump-' % (CFG_DATABASE_NAME,) + if subordinate: + output_file_prefix = 'subordinate-%s-dbdump-' % (CFG_DATABASE_NAME,) else: output_file_prefix = '%s-dbdump-' % (CFG_DATABASE_NAME,) output_file = output_file_prefix + output_file_suffix @@ -336,9 +336,9 @@ def _dbdump_run_task_core(): ignore_tables=ignore_tables) write_message("Database dump ended") finally: - if connection and task_get_option('dump_on_slave_helper_mode'): - write_message("Reattaching slave") - attach_slave(connection) + if connection and task_get_option('dump_on_subordinate_helper_mode'): + write_message("Reattaching subordinate") + attach_subordinate(connection) # prune old dump files: task_update_progress("Pruning old dump files") write_message("Pruning old dump files started") @@ -358,7 +358,7 @@ def main(): -n, --number=NUM Keep up to NUM previous dump files. [default=5] --params=PARAMS Specify your own mysqldump parameters. Optional. --compress Compress dump directly into gzip. - -S, --slave=HOST Perform the dump from a slave, if no host use CFG_DATABASE_SLAVE. + -S, --subordinate=HOST Perform the dump from a subordinate, if no host use CFG_DATABASE_SLAVE. --ignore-tables=regex Ignore tables matching the given regular expression Examples: @@ -366,7 +366,7 @@ def main(): $ dbdump -n3 -o/tmp -s1d -L 02:00-04:00 """ % CFG_LOGDIR, specific_params=("n:o:p:S:", - ["number=", "output=", "params=", "slave=", "compress", 'ignore-tables=', "dump-on-slave-helper"]), + ["number=", "output=", "params=", "subordinate=", "compress", 'ignore-tables=', "dump-on-subordinate-helper"]), task_submit_elaborate_specific_parameter_fnc=_dbdump_elaborate_submit_param, task_run_fnc=_dbdump_run_task_core) diff --git a/modules/miscutil/lib/dbquery.py b/modules/miscutil/lib/dbquery.py index aecd8a4da..5be598c7a 100644 --- a/modules/miscutil/lib/dbquery.py +++ b/modules/miscutil/lib/dbquery.py @@ -110,10 +110,10 @@ def _get_password_from_database_password_file(user): _DB_CONN[CFG_DATABASE_HOST] = {} _DB_CONN[CFG_DATABASE_SLAVE] = {} -def get_connection_for_dump_on_slave(): +def get_connection_for_dump_on_subordinate(): """ Return a valid connection, suitable to perform dump operation - on a slave node of choice. + on a subordinate node of choice. """ connection = connect(host=CFG_DATABASE_SLAVE, port=int(CFG_DATABASE_PORT), @@ -206,7 +206,7 @@ def close_connection(dbhost=CFG_DATABASE_HOST): except KeyError: pass -def run_sql(sql, param=None, n=0, with_desc=False, with_dict=False, run_on_slave=False, connection=None): +def run_sql(sql, param=None, n=0, with_desc=False, with_dict=False, run_on_subordinate=False, connection=None): """Run SQL on the server with PARAM and return result. @param param: tuple of string params to insert in the query (see notes below) @@ -247,7 +247,7 @@ def run_sql(sql, param=None, n=0, with_desc=False, with_dict=False, run_on_slave param = tuple(param) dbhost = CFG_DATABASE_HOST - if run_on_slave and CFG_DATABASE_SLAVE: + if run_on_subordinate and CFG_DATABASE_SLAVE: dbhost = CFG_DATABASE_SLAVE ### log_sql_query(dbhost, sql, param) ### UNCOMMENT ONLY IF you REALLY want to log all queries @@ -296,7 +296,7 @@ def run_sql(sql, param=None, n=0, with_desc=False, with_dict=False, run_on_slave rc = cur.lastrowid return rc -def run_sql_many(query, params, limit=CFG_MISCUTIL_SQL_RUN_SQL_MANY_LIMIT, run_on_slave=False): +def run_sql_many(query, params, limit=CFG_MISCUTIL_SQL_RUN_SQL_MANY_LIMIT, run_on_subordinate=False): """Run SQL on the server with PARAM. This method does executemany and is therefore more efficient than execute but it has sense only with queries that affect state of a database @@ -319,7 +319,7 @@ def run_sql_many(query, params, limit=CFG_MISCUTIL_SQL_RUN_SQL_MANY_LIMIT, run_o return dbhost = CFG_DATABASE_HOST - if run_on_slave and CFG_DATABASE_SLAVE: + if run_on_subordinate and CFG_DATABASE_SLAVE: dbhost = CFG_DATABASE_SLAVE i = 0 r = None @@ -348,7 +348,7 @@ def run_sql_many(query, params, limit=CFG_MISCUTIL_SQL_RUN_SQL_MANY_LIMIT, run_o i += limit return r -def run_sql_with_limit(query, param=None, n=0, with_desc=False, wildcard_limit=0, run_on_slave=False): +def run_sql_with_limit(query, param=None, n=0, with_desc=False, wildcard_limit=0, run_on_subordinate=False): """This function should be used in some cases, instead of run_sql function, in order to protect the db from queries that might take a log time to respond Ex: search queries like [a-z]+ ; cern*; a->z; @@ -360,9 +360,9 @@ def run_sql_with_limit(query, param=None, n=0, with_desc=False, wildcard_limit=0 except ValueError: raise if wildcard_limit < 1:#no limit on the wildcard queries - return run_sql(query, param, n, with_desc, run_on_slave=run_on_slave) + return run_sql(query, param, n, with_desc, run_on_subordinate=run_on_subordinate) safe_query = query + " limit %s" %wildcard_limit - res = run_sql(safe_query, param, n, with_desc, run_on_slave=run_on_slave) + res = run_sql(safe_query, param, n, with_desc, run_on_subordinate=run_on_subordinate) if len(res) == wildcard_limit: raise InvenioDbQueryWildcardLimitError(res) return res @@ -403,7 +403,7 @@ def log_sql_query(dbhost, sql, param=None): except: pass -def get_table_update_time(tablename, run_on_slave=False): +def get_table_update_time(tablename, run_on_subordinate=False): """Return update time of TABLENAME. TABLENAME can contain wildcard `%' in which case we return the maximum update time value. @@ -418,7 +418,7 @@ def get_table_update_time(tablename, run_on_slave=False): # SELECT UPDATE_TIME FROM INFORMATION_SCHEMA.TABLES WHERE # table_name='collection'. res = run_sql("SHOW TABLE STATUS LIKE %s", (tablename,), - run_on_slave=run_on_slave) + run_on_subordinate=run_on_subordinate) update_times = [] # store all update times for row in res: if type(row[10]) is long or \ @@ -433,14 +433,14 @@ def get_table_update_time(tablename, run_on_slave=False): update_times.append(str(row[11])) return max(update_times) -def get_table_status_info(tablename, run_on_slave=False): +def get_table_status_info(tablename, run_on_subordinate=False): """Return table status information on TABLENAME. Returned is a dict with keys like Name, Rows, Data_length, Max_data_length, etc. If TABLENAME does not exist, return empty dict. """ # Note: again a hack so that it works on all MySQL 4.0, 4.1, 5.0 res = run_sql("SHOW TABLE STATUS LIKE %s", (tablename,), - run_on_slave=run_on_slave) + run_on_subordinate=run_on_subordinate) table_status_info = {} # store all update times for row in res: if type(row[10]) is long or \ @@ -489,7 +489,7 @@ def wash_table_column_name(colname): raise Exception('The table column %s is not valid.' % repr(colname)) return colname -def real_escape_string(unescaped_string, run_on_slave=False): +def real_escape_string(unescaped_string, run_on_subordinate=False): """ Escapes special characters in the unescaped string for use in a DB query. @@ -500,7 +500,7 @@ def real_escape_string(unescaped_string, run_on_slave=False): @rtype: str """ dbhost = CFG_DATABASE_HOST - if run_on_slave and CFG_DATABASE_SLAVE: + if run_on_subordinate and CFG_DATABASE_SLAVE: dbhost = CFG_DATABASE_SLAVE connection_object = _db_login(dbhost) escaped_string = connection_object.escape_string(unescaped_string) diff --git a/modules/miscutil/lib/hepdatadisplayutils.py b/modules/miscutil/lib/hepdatadisplayutils.py index d397907ea..1ee995b9c 100644 --- a/modules/miscutil/lib/hepdatadisplayutils.py +++ b/modules/miscutil/lib/hepdatadisplayutils.py @@ -614,21 +614,21 @@ def render_hepdata_dataset_html(dataset, recid, seq, display_link=True): "data_layer_class" : ("hepdata_data_%i" % (seq, )), "plots_layer_class" : ("hepdata_plots_%i" % (seq, )), "data_expander_id" : ("hepdata_expander_%i" % (seq, )), - "masterplot_layer_class" : ("hepdata_masterplot_layer_%i" % (seq,)), - "masterplot_expander_id" : ("hepdata_masterplot_expander_%i" % (seq,)), + "mainplot_layer_class" : ("hepdata_mainplot_layer_%i" % (seq,)), + "mainplot_expander_id" : ("hepdata_mainplot_expander_%i" % (seq,)), "plots_rowspan": len(dataset.data), - "masterplot_rowspan": len(dataset.data_qualifiers) + 3 + "mainplot_rowspan": len(dataset.data_qualifiers) + 3 } - args["collapse_message_masterplot"] = "↓↓↓Hide↓↓↓" - args["expand_message_masterplot"] = "↑↑↑Plot↑↑↑" + args["collapse_message_mainplot"] = "↓↓↓Hide↓↓↓" + args["expand_message_mainplot"] = "↑↑↑Plot↑↑↑" - args["onclick_code_masterplot_expand"] = "expandCollapseDataPlots(this.parentNode.parentNode.parentNode.parentNode, '%(masterplot_layer_class)s', '%(plots_layer_class)s', '%(data_layer_class)s', '%(masterplot_expander_id)s', '%(collapse_message_masterplot)s', '%(expand_message_masterplot)s');" % args + args["onclick_code_mainplot_expand"] = "expandCollapseDataPlots(this.parentNode.parentNode.parentNode.parentNode, '%(mainplot_layer_class)s', '%(plots_layer_class)s', '%(data_layer_class)s', '%(mainplot_expander_id)s', '%(collapse_message_mainplot)s', '%(expand_message_mainplot)s');" % args args["collapse_message_moredata"] = "↑↑↑Collapse↑↑↑" args["expand_message_moredata"] = "↓↓↓Expand↓↓↓" - args["onclick_code_moredata_expand"] = "return expandCollapseDataPlots(this.parentNode.parentNode.parentNode.parentNode, '%(data_layer_class)s','%(plots_layer_class)s', '%(masterplot_layer_class)s', '%(data_expander_id)s', '%(collapse_message_moredata)s', '%(expand_message_moredata)s');" % args + args["onclick_code_moredata_expand"] = "return expandCollapseDataPlots(this.parentNode.parentNode.parentNode.parentNode, '%(data_layer_class)s','%(plots_layer_class)s', '%(mainplot_layer_class)s', '%(data_expander_id)s', '%(collapse_message_moredata)s', '%(expand_message_moredata)s');" % args args["expander_colspan"] = dataset.num_columns + 2 # table_width + 2 @@ -673,13 +673,13 @@ def render_hepdata_dataset_html(dataset, recid, seq, display_link=True): "colspan" : str(dataset.num_columns) }) - c.append("""""" \ + c.append("""""" \ % {"rowspan" : len(dataset.data_qualifiers) + 3}) if multiplot_url: - c.append("""

%(expand_message_masterplot)s

""" \ + c.append("""

%(expand_message_mainplot)s

""" \ % args) c.append("") - c.append("
" % args) + c.append("
" % args) if multiplot_url: c.append("
\"The
" % args) diff --git a/modules/miscutil/lib/hepdatautils.py b/modules/miscutil/lib/hepdatautils.py index 97399e639..3a6b9ec03 100644 --- a/modules/miscutil/lib/hepdatautils.py +++ b/modules/miscutil/lib/hepdatautils.py @@ -645,7 +645,7 @@ def generate_columns(self): for col_h in self.column_headers], [])), {}) # start with {} as initial record - def generate_qualifiers(self, master_recid): + def generate_qualifiers(self, main_recid): """Generate fields describing data qualifiers of a current dataset Returns a record containing only fields with necessary qualifiers """ @@ -660,7 +660,7 @@ def generate_qualifiers(self, master_recid): log_msg = ("""Data qualifier "%(dq)s" does not contain""" +\ """ colon. Record number: %(recid)s """) % { "dq" : dq["content"], - "recid" : str(master_recid) + "recid" : str(main_recid) } hepdata_log("harvesting", log_msg) dq_key = "" diff --git a/modules/miscutil/lib/inveniocfg_upgrader.py b/modules/miscutil/lib/inveniocfg_upgrader.py index f423178b5..d1655411c 100644 --- a/modules/miscutil/lib/inveniocfg_upgrader.py +++ b/modules/miscutil/lib/inveniocfg_upgrader.py @@ -41,7 +41,7 @@ "make check-upgrade" are being run). This means that an upgrade cannot assume anything about which version of Invenio is installed, and thus if the imported module is available or not. - * Once an upgrade have been committed to master/maint, no fiddling is allowed + * Once an upgrade have been committed to main/maint, no fiddling is allowed afterwards. If you want to correct a mistake, make an new upgrade instead. * All upgrades must depend on a previous upgrade (except for your first upgrade). diff --git a/modules/miscutil/lib/upgrades/invenio_2012_11_04_circulation_and_linkback_updates.py b/modules/miscutil/lib/upgrades/invenio_2012_11_04_circulation_and_linkback_updates.py index 9797035d5..83e91ab3e 100644 --- a/modules/miscutil/lib/upgrades/invenio_2012_11_04_circulation_and_linkback_updates.py +++ b/modules/miscutil/lib/upgrades/invenio_2012_11_04_circulation_and_linkback_updates.py @@ -29,8 +29,8 @@ def info(): def do_upgrade(): ## Since Invenio Upgrader was committed to maint-1.1 and merged to - ## master in 8d7ed84, some of the tables that were different in - ## maint-1.1 and master at the time needed upgrade recipe. This + ## main in 8d7ed84, some of the tables that were different in + ## maint-1.1 and main at the time needed upgrade recipe. This ## commit fixes the situation in gentle manner (by checking column ## existence etc), since some sites may have upgraded DB schema in ## various times. diff --git a/modules/miscutil/lib/upgrades/invenio_2012_11_21_aiduserinputlog_userid_check.py b/modules/miscutil/lib/upgrades/invenio_2012_11_21_aiduserinputlog_userid_check.py index ad8afef19..d0bcb49b3 100644 --- a/modules/miscutil/lib/upgrades/invenio_2012_11_21_aiduserinputlog_userid_check.py +++ b/modules/miscutil/lib/upgrades/invenio_2012_11_21_aiduserinputlog_userid_check.py @@ -26,7 +26,7 @@ def info(): def do_upgrade(): """ - Developers upgrading their existing master installations will likely be issued + Developers upgrading their existing main installations will likely be issued with many warnings from invenio_release_1_1_0 upgrade, due to being inbetween 1.0 and 1.1 on the upgrade path. Most warnings can safely be ignored except for one related to aidUSERINPUTLOG. This upgrade implements an extra check of this diff --git a/modules/miscutil/lib/upgrades/invenio_2013_01_12_bibrec_master_format.py b/modules/miscutil/lib/upgrades/invenio_2013_01_12_bibrec_master_format.py index a04271082..595af9662 100644 --- a/modules/miscutil/lib/upgrades/invenio_2013_01_12_bibrec_master_format.py +++ b/modules/miscutil/lib/upgrades/invenio_2013_01_12_bibrec_master_format.py @@ -23,12 +23,12 @@ depends_on = ['invenio_release_1_1_0'] def info(): - return "New bibrec.master_format column" + return "New bibrec.main_format column" def do_upgrade(): create_statement = run_sql('SHOW CREATE TABLE bibrec')[0][1] - if '`master_format` varchar(16)' not in create_statement: - run_sql("ALTER TABLE bibrec ADD COLUMN master_format varchar(16) NOT NULL default 'marc'") + if '`main_format` varchar(16)' not in create_statement: + run_sql("ALTER TABLE bibrec ADD COLUMN main_format varchar(16) NOT NULL default 'marc'") try: import pyparsing diff --git a/modules/miscutil/lib/upgrades/invenio_release_1_1_0.py b/modules/miscutil/lib/upgrades/invenio_release_1_1_0.py index 432cf1f42..08c1bb917 100644 --- a/modules/miscutil/lib/upgrades/invenio_release_1_1_0.py +++ b/modules/miscutil/lib/upgrades/invenio_release_1_1_0.py @@ -53,7 +53,7 @@ def do_upgrade(): """ Perform upgrade """ tables = _get_tables() session_tbl = _get_table_info('session') - if (DB_VERSION == '1.0.0' or DB_VERSION == 'master') and \ + if (DB_VERSION == '1.0.0' or DB_VERSION == 'main') and \ 'session_expiry' not in session_tbl['indexes']: _run_sql_ignore("ALTER TABLE session ADD KEY session_expiry " \ "(session_expiry)") @@ -192,7 +192,7 @@ def do_upgrade(): checksum char(32) NOT NULL, filesize bigint(15) unsigned NOT NULL, mime varchar(100) NOT NULL, - master_format varchar(50) NULL default NULL, + main_format varchar(50) NULL default NULL, PRIMARY KEY (id_bibdoc, version, format), KEY (last_version), KEY (format), @@ -499,7 +499,7 @@ def pre_upgrade(): "inveniocfg --upgrade can safely be ignored when upgrading from" " Invenio v0.99.1-0.99.x.") - if DB_VERSION == 'master': + if DB_VERSION == 'main': warnings.warn("Invenio database schema is on a development version" " between 1.0.x and 1.1.0") @@ -560,7 +560,7 @@ def _invenio_schema_version_guesser(): are identical. @return: One of the values pre-0.99.0, 0.99.0, 0.99.x, 0.99.x-1.0.0, 1.0.0, - 1.0.2, master, unknown + 1.0.2, main, unknown """ tables = [x[0] for x in run_sql("SHOW TABLES;")] @@ -643,7 +643,7 @@ def _invenio_schema_version_guesser(): if 'session_expiry' in tblinfo['indexes']: invenio_version['1.0.2'] += 1 - # '1.1.0/master' indicators + # '1.1.0/main' indicators if 'accMAILCOOKIE' in tables: tblinfo = _get_table_info('accMAILCOOKIE') if 'expiration' in tblinfo['indexes']: @@ -678,7 +678,7 @@ def _invenio_schema_version_guesser(): else: return '1.0.2' else: - return 'master' + return 'main' return 'unknown' diff --git a/modules/webaccess/lib/access_control_admin.py b/modules/webaccess/lib/access_control_admin.py index 4ba8e84d6..7f9916be9 100644 --- a/modules/webaccess/lib/access_control_admin.py +++ b/modules/webaccess/lib/access_control_admin.py @@ -249,10 +249,10 @@ def acc_is_role(name_action, **arguments): # first check if an action exists with this name id_action = acc_get_action_id(name_action) - arole = run_sql("SELECT id_accROLE FROM accROLE_accACTION_accARGUMENT WHERE id_accACTION=%s AND argumentlistid <= 0 LIMIT 1", (id_action, ), 1, run_on_slave=True) + arole = run_sql("SELECT id_accROLE FROM accROLE_accACTION_accARGUMENT WHERE id_accACTION=%s AND argumentlistid <= 0 LIMIT 1", (id_action, ), 1, run_on_subordinate=True) if arole: return True - other_roles_to_check = run_sql("SELECT id_accROLE, keyword, value, argumentlistid FROM accROLE_accACTION_accARGUMENT JOIN accARGUMENT ON id_accARGUMENT=id WHERE id_accACTION=%s AND argumentlistid > 0", (id_action, ), run_on_slave=True) + other_roles_to_check = run_sql("SELECT id_accROLE, keyword, value, argumentlistid FROM accROLE_accACTION_accARGUMENT JOIN accARGUMENT ON id_accARGUMENT=id WHERE id_accACTION=%s AND argumentlistid > 0", (id_action, ), run_on_subordinate=True) other_roles_to_check_dict = {} for id_accROLE, keyword, value, argumentlistid in other_roles_to_check: try: @@ -993,7 +993,7 @@ def acc_get_action_id(name_action): try: return run_sql("""SELECT id FROM accACTION WHERE name = %s""", - (name_action, ), run_on_slave=True)[0][0] + (name_action, ), run_on_subordinate=True)[0][0] except (ProgrammingError, IndexError): return 0 @@ -1099,7 +1099,7 @@ def acc_get_role_id(name_role): """get id of role, name given. """ try: return run_sql("""SELECT id FROM accROLE WHERE name = %s""", - (name_role, ), run_on_slave=True)[0][0] + (name_role, ), run_on_subordinate=True)[0][0] except IndexError: return 0 @@ -1214,7 +1214,7 @@ def acc_is_user_in_role(user_info, id_role): if run_sql("""SELECT ur.id_accROLE FROM user_accROLE ur WHERE ur.id_user = %s AND ur.expiration >= NOW() AND - ur.id_accROLE = %s LIMIT 1""", (user_info['uid'], id_role), 1, run_on_slave=True): + ur.id_accROLE = %s LIMIT 1""", (user_info['uid'], id_role), 1, run_on_subordinate=True): return True return acc_firerole_check_user(user_info, load_role_definition(id_role)) @@ -1229,10 +1229,10 @@ def acc_get_user_roles_from_user_info(user_info): roles = intbitset(run_sql("""SELECT ur.id_accROLE FROM user_accROLE ur WHERE ur.id_user = %s AND ur.expiration >= NOW() - ORDER BY ur.id_accROLE""", (uid, ), run_on_slave=True)) + ORDER BY ur.id_accROLE""", (uid, ), run_on_subordinate=True)) potential_implicit_roles = run_sql("""SELECT id, firerole_def_ser FROM accROLE - WHERE firerole_def_ser IS NOT NULL""", run_on_slave=True) + WHERE firerole_def_ser IS NOT NULL""", run_on_subordinate=True) for role_id, firerole_def_ser in potential_implicit_roles: if role_id not in roles: @@ -1247,7 +1247,7 @@ def acc_get_user_roles(id_user): explicit_roles = run_sql("""SELECT ur.id_accROLE FROM user_accROLE ur WHERE ur.id_user = %s AND ur.expiration >= NOW() - ORDER BY ur.id_accROLE""", (id_user, ), run_on_slave=True) + ORDER BY ur.id_accROLE""", (id_user, ), run_on_subordinate=True) return [id_role[0] for id_role in explicit_roles] @@ -1294,7 +1294,7 @@ def acc_find_user_role_actions(user_info): ur.id_accROLE = raa.id_accROLE AND raa.id_accACTION = a.id AND raa.id_accROLE = r.id """ - res1 = run_sql(query, (uid, ), run_on_slave=True) + res1 = run_sql(query, (uid, ), run_on_subordinate=True) res2 = [] for res in res1: @@ -1307,7 +1307,7 @@ def acc_find_user_role_actions(user_info): WHERE raa.id_accACTION = a.id AND raa.id_accROLE = r.id """ - res3 = run_sql(query, run_on_slave=True) + res3 = run_sql(query, run_on_subordinate=True) res4 = [] for role_name, action_name, role_definition in res3: if acc_firerole_check_user(user_info, @@ -1368,10 +1368,10 @@ def acc_find_possible_roles(name_action, always_add_superadmin=True, **arguments given arguments. roles is a list of role_id """ id_action = acc_get_action_id(name_action) - roles = intbitset(run_sql("SELECT id_accROLE FROM accROLE_accACTION_accARGUMENT WHERE id_accACTION=%s AND argumentlistid <= 0", (id_action, ), run_on_slave=True)) + roles = intbitset(run_sql("SELECT id_accROLE FROM accROLE_accACTION_accARGUMENT WHERE id_accACTION=%s AND argumentlistid <= 0", (id_action, ), run_on_subordinate=True)) if always_add_superadmin: roles.add(CFG_SUPERADMINROLE_ID) - other_roles_to_check = run_sql("SELECT id_accROLE, keyword, value, argumentlistid FROM accROLE_accACTION_accARGUMENT JOIN accARGUMENT ON id_accARGUMENT=id WHERE id_accACTION=%s AND argumentlistid > 0", (id_action, ), run_on_slave=True) + other_roles_to_check = run_sql("SELECT id_accROLE, keyword, value, argumentlistid FROM accROLE_accACTION_accARGUMENT JOIN accARGUMENT ON id_accARGUMENT=id WHERE id_accACTION=%s AND argumentlistid > 0", (id_action, ), run_on_subordinate=True) other_roles_to_check_dict = {} for id_accROLE, keyword, value, argumentlistid in other_roles_to_check: if id_accROLE not in roles: diff --git a/modules/webaccess/lib/access_control_firerole.py b/modules/webaccess/lib/access_control_firerole.py index 98a805cbb..a603c83f4 100644 --- a/modules/webaccess/lib/access_control_firerole.py +++ b/modules/webaccess/lib/access_control_firerole.py @@ -142,14 +142,14 @@ def load_role_definition(role_id): @param role_id: @return: a deserialized compiled role definition """ - res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1, run_on_slave=True) + res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1, run_on_subordinate=True) if res: try: return deserialize(res[0][0]) except Exception: ## Something bad might have happened? (Update of Python?) repair_role_definitions() - res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1, run_on_slave=True) + res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1, run_on_subordinate=True) if res: return deserialize(res[0][0]) return CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ diff --git a/modules/webaccess/lib/access_control_mailcookie.py b/modules/webaccess/lib/access_control_mailcookie.py index afb9072b4..563593203 100644 --- a/modules/webaccess/lib/access_control_mailcookie.py +++ b/modules/webaccess/lib/access_control_mailcookie.py @@ -108,7 +108,7 @@ def mail_cookie_retrieve_kind(cookie): try: password = cookie[:16]+cookie[-16:] cookie_id = int(cookie[16:-16], 16) - res = run_sql("SELECT kind FROM accMAILCOOKIE WHERE id=%s", (cookie_id, ), run_on_slave=True) + res = run_sql("SELECT kind FROM accMAILCOOKIE WHERE id=%s", (cookie_id, ), run_on_subordinate=True) if res: kind = res[0][0] assert(kind in _authorizations_kind) @@ -126,7 +126,7 @@ def mail_cookie_check_common(cookie, delete=False): raise InvenioWebAccessMailCookieError, "Cookie not valid: %s" % e try: res = run_sql("SELECT kind, AES_DECRYPT(data,%s), onetime, status FROM accMAILCOOKIE WHERE " - "id=%s AND expiration>=NOW()", (password, cookie_id), run_on_slave=True) + "id=%s AND expiration>=NOW()", (password, cookie_id), run_on_subordinate=True) if not res: raise StandardError except StandardError: diff --git a/modules/webaccess/lib/external_authentication_cern.py b/modules/webaccess/lib/external_authentication_cern.py index 631aaad9d..bb10633fd 100644 --- a/modules/webaccess/lib/external_authentication_cern.py +++ b/modules/webaccess/lib/external_authentication_cern.py @@ -38,8 +38,8 @@ CFG_EXTERNAL_AUTH_HIDDEN_GROUPS = ( 'All Exchange People', 'CERN Users', - 'cern-computing-postmasters@cern.ch', - 'cern-nice2000-postmasters@cern.ch', + 'cern-computing-postmains@cern.ch', + 'cern-nice2000-postmains@cern.ch', 'CMF FrontEnd Users', 'CMF_NSC_259_NSU', 'Domain Users', diff --git a/modules/webaccess/lib/external_authentication_sso.py b/modules/webaccess/lib/external_authentication_sso.py index 728576a5e..36b31e270 100644 --- a/modules/webaccess/lib/external_authentication_sso.py +++ b/modules/webaccess/lib/external_authentication_sso.py @@ -34,8 +34,8 @@ CFG_EXTERNAL_AUTH_HIDDEN_GROUPS = ( 'All Exchange People', 'CERN Users', - 'cern-computing-postmasters', - 'cern-nice2000-postmasters', + 'cern-computing-postmains', + 'cern-nice2000-postmains', 'CMF FrontEnd Users', 'CMF_NSC_259_NSU', 'Domain Users', diff --git a/modules/websearch/lib/search_engine.py b/modules/websearch/lib/search_engine.py index 07a47cf3e..c1d58270d 100644 --- a/modules/websearch/lib/search_engine.py +++ b/modules/websearch/lib/search_engine.py @@ -229,7 +229,7 @@ def cache_filler(): ret = [] res = run_sql("""SELECT DISTINCT ar.value FROM accROLE_accACTION_accARGUMENT raa JOIN accARGUMENT ar ON raa.id_accARGUMENT = ar.id - WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (VIEWRESTRCOLL_ID,), run_on_slave=True) + WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (VIEWRESTRCOLL_ID,), run_on_subordinate=True) for coll in res: ret.append(coll[0]) return ret diff --git a/modules/websearch/lib/websearch_webcoll.py b/modules/websearch/lib/websearch_webcoll.py index b03a0a911..3ed19d47f 100644 --- a/modules/websearch/lib/websearch_webcoll.py +++ b/modules/websearch/lib/websearch_webcoll.py @@ -1028,7 +1028,7 @@ def get_database_last_updated_timestamp(): database_tables_timestamps.append(get_table_update_time('field%')) database_tables_timestamps.append(get_table_update_time('format%')) database_tables_timestamps.append(get_table_update_time('rnkMETHODNAME')) - database_tables_timestamps.append(get_table_update_time('accROLE_accACTION_accARGUMENT', run_on_slave=True)) + database_tables_timestamps.append(get_table_update_time('accROLE_accACTION_accARGUMENT', run_on_subordinate=True)) return max(database_tables_timestamps) def get_cache_last_updated_timestamp(): diff --git a/modules/websession/lib/webuser.py b/modules/websession/lib/webuser.py index 243068ccb..c028c359f 100644 --- a/modules/websession/lib/webuser.py +++ b/modules/websession/lib/webuser.py @@ -330,7 +330,7 @@ def get_uid_from_email(email): register_exception() return -1 -def isGuestUser(uid, run_on_slave=True): +def isGuestUser(uid, run_on_subordinate=True): """It Checks if the userId corresponds to a guestUser or not isGuestUser(uid) -> boolean @@ -338,7 +338,7 @@ def isGuestUser(uid, run_on_slave=True): out = 1 try: res = run_sql("SELECT email FROM user WHERE id=%s LIMIT 1", (uid,), 1, - run_on_slave=run_on_slave) + run_on_subordinate=run_on_subordinate) if res: if res[0][0]: out = 0 @@ -372,7 +372,7 @@ def isUserSuperAdmin(user_info): FROM accROLE r LEFT JOIN user_accROLE ur ON r.id = ur.id_accROLE WHERE r.name = %s AND - ur.id_user = %s AND ur.expiration>=NOW() LIMIT 1""", (SUPERADMINROLE, user_info['uid']), 1, run_on_slave=True): + ur.id_user = %s AND ur.expiration>=NOW() LIMIT 1""", (SUPERADMINROLE, user_info['uid']), 1, run_on_subordinate=True): return True return acc_firerole_check_user(user_info, load_role_definition(acc_get_role_id(SUPERADMINROLE))) @@ -1087,7 +1087,7 @@ def list_users_in_role(role): FROM user_accROLE uacc JOIN accROLE acc ON uacc.id_accROLE=acc.id WHERE acc.name=%s""", - (role,), run_on_slave=True) + (role,), run_on_subordinate=True) if res: return map(lambda x: int(x[0]), res) return [] @@ -1111,7 +1111,7 @@ def list_users_in_roles(role_list): for role in role_list[:-1]: query_addons += "acc.name=%s OR " query_addons += "acc.name=%s" - res = run_sql(query + query_addons, query_params, run_on_slave=True) + res = run_sql(query + query_addons, query_params, run_on_subordinate=True) if res: return map(lambda x: int(x[0]), res) return [] diff --git a/modules/webstyle/lib/webinterface_handler.py b/modules/webstyle/lib/webinterface_handler.py index 416863271..eaf50ad54 100644 --- a/modules/webstyle/lib/webinterface_handler.py +++ b/modules/webstyle/lib/webinterface_handler.py @@ -395,7 +395,7 @@ def _handler(req): else: setUid(req=req, uid=uid) - guest_p = isGuestUser(getUid(req), run_on_slave=False) + guest_p = isGuestUser(getUid(req), run_on_subordinate=False) uri = req.uri if uri == '/': diff --git a/modules/websubmit/lib/functions/Video_Processing.py b/modules/websubmit/lib/functions/Video_Processing.py index a7f9dc0e2..6306608e2 100644 --- a/modules/websubmit/lib/functions/Video_Processing.py +++ b/modules/websubmit/lib/functions/Video_Processing.py @@ -70,8 +70,8 @@ def Video_Processing(parameters, curdir, form, user_info=None): fp = open(file_storing_name) filename = fp.read() fp.close() - batch_template['bibdoc_master_docname'] = os.path.splitext(os.path.split(filename)[1])[0] - batch_template['bibdoc_master_extension'] = os.path.splitext(filename)[1] + batch_template['bibdoc_main_docname'] = os.path.splitext(os.path.split(filename)[1])[0] + batch_template['bibdoc_main_extension'] = os.path.splitext(filename)[1] batch_template['submission_filename'] = filename except: register_exception(prefix="The file containing the original filename of the video was not readable") diff --git a/modules/websubmit/lib/websubmitadmin_engine.py b/modules/websubmit/lib/websubmitadmin_engine.py index ab65f65ad..b47b0a72c 100644 --- a/modules/websubmit/lib/websubmitadmin_engine.py +++ b/modules/websubmit/lib/websubmitadmin_engine.py @@ -517,7 +517,7 @@ def perform_request_organise_submission_page(doctype="", user_msg.append("Submission-collection moved downwards") else: - ## cannot move the master (0) collection + ## cannot move the main (0) collection user_msg.append("Unable to move submission-collection downwards") (title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg) elif "" not in (movesbmcollectionup, sbmcolid): @@ -557,7 +557,7 @@ def perform_request_organise_submission_page(doctype="", score_brother_above) user_msg.append("Submission-collection moved upwards") else: - ## cannot move the master (0) collection + ## cannot move the main (0) collection user_msg.append("Unable to move submission-collection upwards") (title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg) elif "" not in (addsbmcollection, addtosbmcollection): diff --git a/modules/websubmit/lib/websubmitadmincli.py b/modules/websubmit/lib/websubmitadmincli.py index 5799d615a..d57af431f 100644 --- a/modules/websubmit/lib/websubmitadmincli.py +++ b/modules/websubmit/lib/websubmitadmincli.py @@ -52,9 +52,9 @@ Diff submission with given dump: Eg: websubmitadmin --diff=DEMOART < DEMOART_db_dump.sql -Diff between latest version in 'master' branch of your Git repo, with +Diff between latest version in 'main' branch of your Git repo, with version in database: -Eg: git show master:websubmit/DEMOART_db_dump.sql | ../websubmitadmin --diff=DEMOART | less -S +Eg: git show main:websubmit/DEMOART_db_dump.sql | ../websubmitadmin --diff=DEMOART | less -S Diff between CVS version and submission in database, ignoring dates and ordering of submission fields on the page: