diff --git a/Pipfile b/Pipfile index 05da418..bf84b1d 100644 --- a/Pipfile +++ b/Pipfile @@ -13,7 +13,6 @@ clint = "==0.5.1" coverage = "==4.5.4" "ruamel.yaml" = "==0.16.10" yamale = "==3.0.4" -PyYaml = "*" [requires] python_version = "3.7" diff --git a/Pipfile.lock b/Pipfile.lock index 46e3541..9de47b5 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -70,30 +70,38 @@ }, "pyyaml": { "hashes": [ - "sha256:02c78d77281d8f8d07a255e57abdbf43b02257f59f50cc6b636937d68efa5dd0", - "sha256:0dc9f2eb2e3c97640928dec63fd8dc1dd91e6b6ed236bd5ac00332b99b5c2ff9", - "sha256:124fd7c7bc1e95b1eafc60825f2daf67c73ce7b33f1194731240d24b0d1bf628", - "sha256:26fcb33776857f4072601502d93e1a619f166c9c00befb52826e7b774efaa9db", - "sha256:31ba07c54ef4a897758563e3a0fcc60077698df10180abe4b8165d9895c00ebf", - "sha256:3c49e39ac034fd64fd576d63bb4db53cda89b362768a67f07749d55f128ac18a", - "sha256:52bf0930903818e600ae6c2901f748bc4869c0c406056f679ab9614e5d21a166", - "sha256:5a3f345acff76cad4aa9cb171ee76c590f37394186325d53d1aa25318b0d4a09", - "sha256:5e7ac4e0e79a53451dc2814f6876c2fa6f71452de1498bbe29c0b54b69a986f4", - "sha256:7242790ab6c20316b8e7bb545be48d7ed36e26bbe279fd56f2c4a12510e60b4b", - "sha256:737bd70e454a284d456aa1fa71a0b429dd527bcbf52c5c33f7c8eee81ac16b89", - "sha256:8635d53223b1f561b081ff4adecb828fd484b8efffe542edcfdff471997f7c39", - "sha256:8b818b6c5a920cbe4203b5a6b14256f0e5244338244560da89b7b0f1313ea4b6", - "sha256:8bf38641b4713d77da19e91f8b5296b832e4db87338d6aeffe422d42f1ca896d", - "sha256:a36a48a51e5471513a5aea920cdad84cbd56d70a5057cca3499a637496ea379c", - "sha256:b2243dd033fd02c01212ad5c601dafb44fbb293065f430b0d3dbf03f3254d615", - "sha256:cc547d3ead3754712223abb7b403f0a184e4c3eae18c9bb7fd15adef1597cc4b", - "sha256:cc552b6434b90d9dbed6a4f13339625dc466fd82597119897e9489c953acbc22", - "sha256:f3790156c606299ff499ec44db422f66f05a7363b39eb9d5b064f17bd7d7c47b", - "sha256:f7a21e3d99aa3095ef0553e7ceba36fb693998fbb1226f1392ce33681047465f", - "sha256:fdc6b2cb4b19e431994f25a9160695cc59a4e861710cc6fc97161c5e845fc579" - ], - "index": "Brain Brew", - "version": "==5.4" + "sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf", + "sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696", + "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393", + "sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77", + "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922", + "sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5", + "sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8", + "sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10", + "sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc", + "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018", + "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e", + "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253", + "sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347", + "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183", + "sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541", + "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb", + "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185", + "sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc", + "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db", + "sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa", + "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46", + "sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122", + "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b", + "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63", + "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df", + "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc", + "sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247", + "sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6", + "sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", + "version": "==5.4.1" }, "ruamel.yaml": { "hashes": [ @@ -137,7 +145,7 @@ "sha256:e9f7d1d8c26a6a12c23421061f9022bb62704e38211fe375c645485f38df34a2", "sha256:f6061a31880c1ed6b6ce341215336e2f3d0c1deccd84957b6fa8ca474b41e89f" ], - "markers": "platform_python_implementation == 'CPython' and python_version < '3.9'", + "markers": "python_version < '3.9' and platform_python_implementation == 'CPython'", "version": "==0.2.2" }, "yamale": { @@ -154,6 +162,7 @@ "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6", "sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.3.0" }, "bleach": { @@ -161,6 +170,7 @@ "sha256:6123ddc1052673e52bab52cdc955bcb57a015264a1c57d37bea2f6b817af0125", "sha256:98b3170739e5e83dd9dc19633f074727ad848cbedb6026708c8ac2d3b697a433" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==3.3.0" }, "certifi": { @@ -217,6 +227,7 @@ "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa", "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==4.0.0" }, "colorama": { @@ -224,6 +235,7 @@ "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b", "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==0.4.4" }, "cryptography": { @@ -245,16 +257,18 @@ }, "docutils": { "hashes": [ - "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", - "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" + "sha256:a71042bb7207c03d5647f280427f14bfbd1a65c9eb84f4b341d85fafb6bb4bdf", + "sha256:e2ffeea817964356ba4470efba7c2f42b6b0de0b04e66378507e3e2504bbff4c" ], - "version": "==0.16" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==0.17" }, "idna": { "hashes": [ "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.10" }, "importlib-metadata": { @@ -285,6 +299,7 @@ "sha256:5652a9ac72209ed7df8d9c15daf4e1aa0e3d2ccd3c87f8265a0673cd9cbc9ced", "sha256:c5d6da9ca3ff65220c3bfd2a8db06d698f05d4d2b9be57e1deb2be5a45019713" ], + "markers": "python_version >= '3.5'", "version": "==8.7.0" }, "packaging": { @@ -292,6 +307,7 @@ "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5", "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.9" }, "pkginfo": { @@ -306,6 +322,7 @@ "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.13.1" }, "py": { @@ -313,6 +330,7 @@ "sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3", "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.10.0" }, "pycparser": { @@ -320,6 +338,7 @@ "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.20" }, "pygments": { @@ -334,6 +353,7 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pytest": { @@ -356,6 +376,7 @@ "sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804", "sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.25.1" }, "requests-toolbelt": { @@ -385,6 +406,7 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "tqdm": { @@ -396,20 +418,11 @@ }, "twine": { "hashes": [ - "sha256:2f6942ec2a17417e19d2dd372fc4faa424c87ee9ce49b4e20c427eb00a0f3f41", - "sha256:fcffa8fc37e8083a5be0728371f299598870ee1eccc94e9a25cef7b1dcfa8297" + "sha256:16f706f2f1687d7ce30e7effceee40ed0a09b7c33b9abb5ef6434e5551565d83", + "sha256:a56c985264b991dc8a8f4234eb80c5af87fa8080d0c224ad8f2cd05a2c22e83b" ], "index": "Brain Brew", - "version": "==3.3.0" - }, - "typing-extensions": { - "hashes": [ - "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918", - "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c", - "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f" - ], - "markers": "python_version < '3.8'", - "version": "==3.7.4.3" + "version": "==3.4.1" }, "urllib3": { "hashes": [ diff --git a/README.md b/README.md index 07ca7fa..8849a04 100644 --- a/README.md +++ b/README.md @@ -7,9 +7,74 @@ Brain Brew is an open-source flashcard manipulation tool designed to allow users The goal is to facilitate collaboration and maximize user choice, with a powerful tool that minimizes effort. [CrowdAnki](https://github.com/Stvad/CrowdAnki) Exports and Csv(s) are the only supported file types as of now, but there will be more to come. -:exclamation: See the [Brain Brew Starter Project](https://github.com/ohare93/brain-brew-starter) for a working clone-able Git repo. -Or you can install the latest version of [Brain Brew on PyPi.org](https://pypi.org/project/Brain-Brew/). +[Anki Ultimate Geography](https://github.com/axelboc/anki-ultimate-geography/) is currently the best working example of a Flashcard repo using Brain Brew :tada: +See there for inspiration! + + +# Installation + + +Install the latest version of [Brain Brew on PyPi.org](https://pypi.org/project/Brain-Brew/) +with `pip install brain-brew`. Virtual environment using `pipenv` is recommended! + +:exclamation: See the [Brain Brew Starter Project][BrainBrewStarter]for a working clone-able Git repo. +From this repo you can now create a functional Brain Brew setup automatically, +with your own flashcards! Simply by running + +```bash +brainbrew init [Your CrowdAnki Export Folder] +``` + +This will generate the entire working repo for you, including the recipe files, source files, and build folder. +For bi-directional sync: Anki <-> Source! + +See [the starter repo][BrainBrewStarter] for a step-by-step guide for all of this. + +# Usage + +Brain Brew runs from the command line and takes a *Recipe.yaml* file to run. + +```bash +brainbrew run source_to_anki.yaml +``` + +Full usage help text: +```bash +Brain Brew vx.y.z +usage: brainbrew [-h] {run,init} ... + +Manage Flashcards by transforming them to various types. + +positional arguments: + {run,init} Commands that can be run + run Run a recipe file. This will convert some data to another format, based on the instructions in the recipe file. + init Initialise a Brain Brew repository, using a CrowdAnki export as the base data. + +optional arguments: + -h, --help show this help message and exit +``` + + +## Recipes + +These are the instructions for how Brain Brew will ~~build~~ *brew* your data into another format. + +What's YAML? See the current spec [here](http://www.yaml.org/spec/1.2/spec.html). + +Run a recipe with `--verify` or `-v` to confirm your recipe is valid, without actually running it. +A dry run of sorts. + +### Tasks + +A recipe is made of many individual tasks, which do specific functions. +Full detailed list coming soon™️, but see the [Yamale recipe schema](https://github.com/ohare93/brain-brew/blob/master/brain_brew/schemas/recipe.yaml) +(local file: `brain_brew/schemas/recipe.yaml`) in the meantime :+1: + + + + +[//]: <> (Yamale) # The Why @@ -27,7 +92,7 @@ You can see any changes that occur, go back in time should an mistake be discove However the current tools for managing Anki cards in source control (such as [Anki-DM](https://github.com/OnkelTem/anki-dm), [GenAnki](https://github.com/kerrickstaley/genanki), -and [Remote Decks](https://github.com/c-okelly/anki-remote-decks)) are only one way direction. +and [Remote Decks](https://github.com/c-okelly/anki-remote-decks)) are only one way. You generate cards from a csv into a file that can *only be imported* into Anki. There is no way to export them back, meaning a user must manually copy their changes over, or simple not edit their cards anywhere other than in source control. @@ -67,28 +132,43 @@ Reusable subconfig files allow for minor changes without breaking the DRY princi --> ```Yaml -# Build tasks to run -tasks: - # Convert a collection of csvs into Deck Parts - - csv_collection_to_parts: - notes: words.json - - # List of Note Models - # How each Note Model used is built up from the csv data +- generate_guids_in_csv: + source: src/data/words.csv + columns: [ guid ] + +- build_parts: + - note_model_from_yaml_part: + part_id: LL Word + file: src/note_models/LL Word.yaml + + - headers_from_yaml_part: + part_id: default header + file: src/headers/default.yaml + override: # Optional + deck_description_html_file: src/headers/desc.html + + - media_group_from_folder: + part_id: all_media + source: src/media + recursive: true # Optional + + - notes_from_csvs: + part_id: english-to-danish + note_model_mappings: - - note_model: LL Word - csv_columns_to_fields: # Map of csv column to note model field + - note_models: + - LL Word + columns_to_fields: # Optional guid: guid tags: tags english: English danish: Word - danish audio: Pronunciation (Recording and/or IPA) picture: Picture - - # List of csvs to use - csv_file_mappings: - - csv: csvs/words.csv + danish audio: Pronunciation (Recording and/or IPA) + + file_mappings: + - file: src/data/words.csv note_model: LL Word sort_by_columns: [english] # Optional reverse_sort: no # Optional @@ -133,15 +213,20 @@ The two following csv files contain information about England, but split into di Brain Brew can be told that `data-capital` is a derivative of `data-main` in the build config file as such: ```yaml -- csv: src/data/data-main.csv # <---- Main +- file: src/data/data-main.csv # <---- Main note_model: Ultimate Geography derivatives: - - csv: src/data/data-country.csv - - csv: src/data/data-country-info.csv - - csv: src/data/data-capital.csv # <---- Capital - - csv: src/data/data-capital-info.csv - - csv: src/data/data-capital-hint.csv - - csv: src/data/data-flag-similarity.csv + - file: src/data/data-country.csv + - file: src/data/data-country-info.csv + - file: src/data/data-capital.csv # <---- Capital + - file: src/data/data-capital-info.csv + - file: src/data/data-capital-hint.csv + # note_model: different_note_model + # derivatives: + # - file: derivative-of-a-derivative.csv + # derivatives: + # - file: infinite-nesting.csv + - file: src/data/data-flag-similarity.csv ``` When run Brain Brew will perform the following steps for each derivative: @@ -162,36 +247,8 @@ When run Brain Brew will perform the following steps for each derivative: 1. **Derivatives can be given a Note Model**, which overrides their parent's note model for all the matched rows. -See the [Brain Brew Starter Project](https://github.com/ohare93/brain-brew-starter) for an example of Csv Derivatives working. - - - -# This allows for -* Collaboration with many people -* Freedom of tool choice - * Use any currently existing Anki add-on to edit or update your cards -* User choice of file type - - - -# Limitations -* Note Models cannot yet be generated, and are very ugly and hard to manage -* Deck headers are terrible, and I hope to remove them entirely as a necessary Deck Part by making a PR in CrowdAnki - - -# Planned Work - -#### Personal Fields -Full support for Personal Fields included in CrowdAnki. Including the ability to set a default value for new cards. - -#### More Source Types -* Markdown -* Yaml -* Google Sheets +See the [Brain Brew Starter Project][BrainBrewStarter] for an example of Csv Derivatives working. -#### Note Model Generation / Syncing -Two way Note Model building, so that users can change the Note Model somewhere other than Anki. -#### Deck Header Removal -Deck Headers should not be necessary for an import into CrowdAnki (or Anki itself). I hope to remove the need for them entirely. +[BrainBrewStarter]: https://github.com/ohare93/brain-brew-starter \ No newline at end of file diff --git a/brain_brew/build_tasks/crowd_anki/crowd_anki_generate.py b/brain_brew/build_tasks/crowd_anki/crowd_anki_generate.py index 0f3d2ab..b9d99e2 100644 --- a/brain_brew/build_tasks/crowd_anki/crowd_anki_generate.py +++ b/brain_brew/build_tasks/crowd_anki/crowd_anki_generate.py @@ -5,7 +5,7 @@ from brain_brew.build_tasks.crowd_anki.media_group_to_crowd_anki import MediaGroupToCrowdAnki from brain_brew.build_tasks.crowd_anki.note_models_to_crowd_anki import NoteModelsToCrowdAnki from brain_brew.build_tasks.crowd_anki.notes_to_crowd_anki import NotesToCrowdAnki -from brain_brew.configuration.build_config.build_task import TopLevelBuildTask +from brain_brew.commands.run_recipe.build_task import TopLevelBuildTask from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.representation.generic.media_file import MediaFile from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport @@ -44,6 +44,7 @@ class Representation(RepresentationBase): def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, crowd_anki_export=CrowdAnkiExport.create_or_get(rep.folder), notes_transform=NotesToCrowdAnki.from_repr(rep.notes), note_model_transform=NoteModelsToCrowdAnki.from_repr(rep.note_models), @@ -51,6 +52,7 @@ def from_repr(cls, data: Union[Representation, dict]): media_transform=MediaGroupToCrowdAnki.from_repr(rep.media) if rep.media else None ) + rep: Representation crowd_anki_export: CrowdAnkiExport notes_transform: NotesToCrowdAnki note_model_transform: NoteModelsToCrowdAnki diff --git a/brain_brew/build_tasks/crowd_anki/headers_from_crowdanki.py b/brain_brew/build_tasks/crowd_anki/headers_from_crowdanki.py index 9cbc417..5e32a01 100644 --- a/brain_brew/build_tasks/crowd_anki/headers_from_crowdanki.py +++ b/brain_brew/build_tasks/crowd_anki/headers_from_crowdanki.py @@ -1,7 +1,7 @@ from dataclasses import dataclass, field from typing import Union, Optional -from brain_brew.configuration.build_config.build_task import BuildPartTask +from brain_brew.commands.run_recipe.build_task import BuildPartTask from brain_brew.configuration.part_holder import PartHolder from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport @@ -30,28 +30,30 @@ def task_regex(cls) -> str: @classmethod def yamale_schema(cls) -> str: return f'''\ - source: str() part_id: str() + source: str() save_to_file: str(required=False) ''' @dataclass class Representation(RepresentationBase): - source: str part_id: str + source: str save_to_file: Optional[str] = field(default=None) @classmethod def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, ca_export=CrowdAnkiExport.create_or_get(rep.source), part_id=rep.part_id, save_to_file=rep.save_to_file ) - ca_export: CrowdAnkiExport + rep: Representation part_id: str + ca_export: CrowdAnkiExport save_to_file: Optional[str] def execute(self): @@ -59,7 +61,7 @@ def execute(self): headers = Headers(self.crowd_anki_to_headers(ca_wrapper.data)) - PartHolder.override_or_create(self.part_id, self.save_to_file, headers) + return PartHolder.override_or_create(self.part_id, self.save_to_file, headers) @staticmethod def crowd_anki_to_headers(ca_data: dict): diff --git a/brain_brew/build_tasks/crowd_anki/headers_to_crowd_anki.py b/brain_brew/build_tasks/crowd_anki/headers_to_crowd_anki.py index cfec757..9a85a7e 100644 --- a/brain_brew/build_tasks/crowd_anki/headers_to_crowd_anki.py +++ b/brain_brew/build_tasks/crowd_anki/headers_to_crowd_anki.py @@ -1,4 +1,4 @@ -from dataclasses import dataclass, field +from dataclasses import dataclass from typing import Union from brain_brew.build_tasks.crowd_anki.headers_from_crowdanki import headers_default_values @@ -24,9 +24,11 @@ def from_repr(cls, data: Union[Representation, dict, str]): rep = cls.Representation(part_id=data) # Support single string being passed in return cls( + rep=rep, headers=PartHolder.from_file_manager(rep.part_id).part ) + rep: Representation headers: Headers def execute(self) -> dict: diff --git a/brain_brew/build_tasks/crowd_anki/media_group_from_crowd_anki.py b/brain_brew/build_tasks/crowd_anki/media_group_from_crowd_anki.py index 1d045a2..29cf421 100644 --- a/brain_brew/build_tasks/crowd_anki/media_group_from_crowd_anki.py +++ b/brain_brew/build_tasks/crowd_anki/media_group_from_crowd_anki.py @@ -5,7 +5,7 @@ from brain_brew.configuration.part_holder import PartHolder from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport from brain_brew.representation.yaml.media_group import MediaGroup -from brain_brew.transformers.media_group_from_location import create_media_group_from_location +from brain_brew.transformers.create_media_group_from_location import create_media_group_from_location @dataclass @@ -20,6 +20,7 @@ def from_repr(cls, data: Union[MediaGroupFromFolder.Representation, dict]): cae: CrowdAnkiExport = CrowdAnkiExport.create_or_get(rep.source) return cls( + rep=rep, part=create_media_group_from_location( part_id=rep.part_id, save_to_file=rep.save_to_file, @@ -31,6 +32,7 @@ def from_repr(cls, data: Union[MediaGroupFromFolder.Representation, dict]): ) ) + rep: MediaGroupFromFolder.Representation part: MediaGroup def execute(self): diff --git a/brain_brew/build_tasks/crowd_anki/media_group_to_crowd_anki.py b/brain_brew/build_tasks/crowd_anki/media_group_to_crowd_anki.py index d1e294a..705d3a0 100644 --- a/brain_brew/build_tasks/crowd_anki/media_group_to_crowd_anki.py +++ b/brain_brew/build_tasks/crowd_anki/media_group_to_crowd_anki.py @@ -6,7 +6,7 @@ from brain_brew.interfaces.yamale_verifyable import YamlRepr from brain_brew.representation.generic.media_file import MediaFile from brain_brew.representation.yaml.media_group import MediaGroup -from brain_brew.transformers.media_group_save_to_location import save_media_groups_to_location +from brain_brew.transformers.save_media_group_to_location import save_media_groups_to_location @dataclass @@ -29,9 +29,11 @@ class Representation(RepresentationBase): def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, parts=list(holder.part for holder in map(PartHolder.from_file_manager, rep.parts)) ) + rep: Representation parts: List[MediaGroup] def execute(self, ca_media_folder: str) -> Set[MediaFile]: diff --git a/brain_brew/build_tasks/crowd_anki/note_models_from_crowd_anki.py b/brain_brew/build_tasks/crowd_anki/note_model_single_from_crowd_anki.py similarity index 85% rename from brain_brew/build_tasks/crowd_anki/note_models_from_crowd_anki.py rename to brain_brew/build_tasks/crowd_anki/note_model_single_from_crowd_anki.py index a25048b..583ad5f 100644 --- a/brain_brew/build_tasks/crowd_anki/note_models_from_crowd_anki.py +++ b/brain_brew/build_tasks/crowd_anki/note_model_single_from_crowd_anki.py @@ -1,7 +1,7 @@ from dataclasses import dataclass, field from typing import Optional, Union -from brain_brew.configuration.build_config.build_task import BuildPartTask +from brain_brew.commands.run_recipe.build_task import BuildPartTask from brain_brew.configuration.part_holder import PartHolder from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport @@ -10,28 +10,24 @@ @dataclass -class NoteModelsFromCrowdAnki(BuildPartTask): +class NoteModelSingleFromCrowdAnki(BuildPartTask): @classmethod def task_name(cls) -> str: - return r'note_models_from_crowd_anki' - - @classmethod - def task_regex(cls) -> str: - return r'note_model[s]?_from_crowd_anki' + return r'note_model_from_crowd_anki' @classmethod def yamale_schema(cls) -> str: return f'''\ - source: str() part_id: str() + source: str() model_name: str(required=False) save_to_file: str(required=False) ''' @dataclass class Representation(RepresentationBase): - source: str part_id: str + source: str model_name: Optional[str] = field(default=None) save_to_file: Optional[str] = field(default=None) # TODO: fields: Optional[List[str]] @@ -41,15 +37,17 @@ class Representation(RepresentationBase): def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, ca_export=CrowdAnkiExport.create_or_get(rep.source), part_id=rep.part_id, model_name=rep.model_name or rep.part_id, save_to_file=rep.save_to_file ) + rep: Representation + part_id: str ca_export: CrowdAnkiExport model_name: str - part_id: str save_to_file: Optional[str] def execute(self): @@ -61,4 +59,4 @@ def execute(self): raise ReferenceError(f"Missing Note Model '{self.model_name}' in CrowdAnki file") part = NoteModel.from_crowdanki(note_models_dict[self.model_name]) - PartHolder.override_or_create(self.part_id, self.save_to_file, part) + return PartHolder.override_or_create(self.part_id, self.save_to_file, part) diff --git a/brain_brew/build_tasks/crowd_anki/note_models_all_from_crowd_anki.py b/brain_brew/build_tasks/crowd_anki/note_models_all_from_crowd_anki.py new file mode 100644 index 0000000..17e2950 --- /dev/null +++ b/brain_brew/build_tasks/crowd_anki/note_models_all_from_crowd_anki.py @@ -0,0 +1,52 @@ +import logging +from dataclasses import dataclass, field +from typing import Optional, Union, List + +from brain_brew.commands.run_recipe.build_task import BuildPartTask +from brain_brew.configuration.part_holder import PartHolder +from brain_brew.configuration.representation_base import RepresentationBase +from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport +from brain_brew.representation.json.wrappers_for_crowd_anki import CrowdAnkiJsonWrapper +from brain_brew.representation.yaml.note_model import NoteModel + + +@dataclass +class NoteModelsAllFromCrowdAnki(BuildPartTask): + @classmethod + def task_name(cls) -> str: + return r'note_models_all_from_crowd_anki' + + @classmethod + def yamale_schema(cls) -> str: + return f'''\ + source: str() + ''' + + @dataclass + class Representation(RepresentationBase): + source: str + + @classmethod + def from_repr(cls, data: Union[Representation, dict]): + rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) + return cls( + rep=rep, + ca_export=CrowdAnkiExport.create_or_get(rep.source) + ) + + rep: Representation + ca_export: CrowdAnkiExport + + def execute(self) -> List[PartHolder[NoteModel]]: + ca_wrapper: CrowdAnkiJsonWrapper = self.ca_export.json_data + + note_models_dict = {model.get('name'): model for model in ca_wrapper.note_models} + + parts = [] + for name, model in note_models_dict.items(): + parts.append(PartHolder.override_or_create(name, None, NoteModel.from_crowdanki(model))) + + logging.info(f"Found {len(parts)} note model{'s' if len(parts) > 1 else ''} in CrowdAnki Export: '" + + "', '".join(note_models_dict.keys()) + "'") + + return parts diff --git a/brain_brew/build_tasks/crowd_anki/note_models_to_crowd_anki.py b/brain_brew/build_tasks/crowd_anki/note_models_to_crowd_anki.py index 2e278ca..a08e613 100644 --- a/brain_brew/build_tasks/crowd_anki/note_models_to_crowd_anki.py +++ b/brain_brew/build_tasks/crowd_anki/note_models_to_crowd_anki.py @@ -52,6 +52,7 @@ def from_repr(cls, data: Union[Representation, dict, str]): rep = cls.Representation(part_id=data) # Support string return cls( + rep=rep, part_to_read=rep.part_id ) @@ -59,6 +60,7 @@ def get_note_model(self) -> PartHolder[NoteModel]: self.part = PartHolder.from_file_manager(self.part_to_read) return self.part # Todo: add filters in here + rep: Representation part: PartHolder[NoteModel] = field(init=False) part_to_read: str @@ -78,9 +80,11 @@ def from_repr(cls, data: Union[Representation, dict, List[str]]): note_model_items = list(map(cls.NoteModelListItem.from_repr, rep.parts)) return cls( + rep=rep, note_models=[nm.get_note_model() for nm in note_model_items] ) + rep: Representation note_models: List[PartHolder[NoteModel]] def execute(self) -> List[dict]: diff --git a/brain_brew/build_tasks/crowd_anki/notes_from_crowd_anki.py b/brain_brew/build_tasks/crowd_anki/notes_from_crowd_anki.py index 7430826..d38fc27 100644 --- a/brain_brew/build_tasks/crowd_anki/notes_from_crowd_anki.py +++ b/brain_brew/build_tasks/crowd_anki/notes_from_crowd_anki.py @@ -3,7 +3,7 @@ from typing import Union, Optional, List from brain_brew.build_tasks.crowd_anki.shared_base_notes import SharedBaseNotes -from brain_brew.configuration.build_config.build_task import BuildPartTask +from brain_brew.commands.run_recipe.build_task import BuildPartTask from brain_brew.configuration.part_holder import PartHolder from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport @@ -20,8 +20,8 @@ def task_name(cls) -> str: @classmethod def yamale_schema(cls) -> str: return f'''\ - source: str() part_id: str() + source: str() sort_order: list(str(), required=False) save_to_file: str(required=False) reverse_sort: str(required=False) @@ -29,8 +29,8 @@ def yamale_schema(cls) -> str: @dataclass class Representation(RepresentationBase): - source: str part_id: str + source: str sort_order: Optional[List[str]] = field(default_factory=lambda: None) reverse_sort: Optional[bool] = field(default_factory=lambda: None) save_to_file: Optional[str] = field(default=None) @@ -39,6 +39,7 @@ class Representation(RepresentationBase): def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, ca_export=CrowdAnkiExport.create_or_get(rep.source), part_id=rep.part_id, sort_order=SharedBaseNotes._get_sort_order(rep.sort_order), @@ -46,22 +47,26 @@ def from_repr(cls, data: Union[Representation, dict]): save_to_file=rep.save_to_file ) - ca_export: CrowdAnkiExport + rep: Representation part_id: str + ca_export: CrowdAnkiExport sort_order: Optional[List[str]] reverse_sort: Optional[bool] save_to_file: Optional[str] - def execute(self): + def execute(self) -> PartHolder[Notes]: ca_wrapper: CrowdAnkiJsonWrapper = self.ca_export.json_data if ca_wrapper.children: logging.warning("Child Decks / Sub-decks are not currently supported.") - nm_id_to_name: dict = {model.id: model.name for model in self.ca_export.note_models} - note_list = [self.ca_note_to_note(note, nm_id_to_name) for note in ca_wrapper.notes] + ca_models = self.ca_export.note_models + ca_notes = ca_wrapper.notes + + nm_id_to_name: dict = {model.id: model.name for model in ca_models} + note_list = [self.ca_note_to_note(note, nm_id_to_name) for note in ca_notes] notes = Notes.from_list_of_notes(note_list) # TODO: pass in sort method - PartHolder.override_or_create(self.part_id, self.save_to_file, notes) + return PartHolder.override_or_create(self.part_id, self.save_to_file, notes) @staticmethod def ca_note_to_note(note: dict, nm_id_to_name: dict) -> Note: diff --git a/brain_brew/build_tasks/crowd_anki/notes_to_crowd_anki.py b/brain_brew/build_tasks/crowd_anki/notes_to_crowd_anki.py index 90cd72b..3a249ce 100644 --- a/brain_brew/build_tasks/crowd_anki/notes_to_crowd_anki.py +++ b/brain_brew/build_tasks/crowd_anki/notes_to_crowd_anki.py @@ -25,6 +25,7 @@ def yamale_schema(cls) -> str: reverse_sort: bool(required=False) additional_items_to_add: map(str(), key=str(), required=False) override: include('{NotesOverride.task_name()}', required=False) + case_insensitive_sort: bool(required=False) ''' @classmethod @@ -38,27 +39,36 @@ class Representation(RepresentationBase): sort_order: Optional[List[str]] = field(default_factory=lambda: None) reverse_sort: Optional[bool] = field(default_factory=lambda: None) override: Optional[dict] = field(default_factory=lambda: None) + case_insensitive_sort: Optional[bool] = field(default_factory=lambda: None) @classmethod def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, notes=PartHolder.from_file_manager(rep.part_id).part, sort_order=SharedBaseNotes._get_sort_order(rep.sort_order), reverse_sort=SharedBaseNotes._get_reverse_sort(rep.reverse_sort), additional_items_to_add=rep.additional_items_to_add or {}, - override=NotesOverride.from_repr(rep.override) if rep.override else None + override=NotesOverride.from_repr(rep.override) if rep.override else None, + case_insensitive_sort=rep.case_insensitive_sort or True ) + rep: Representation notes: Notes additional_items_to_add: dict sort_order: Optional[List[str]] = field(default_factory=lambda: None) reverse_sort: Optional[bool] = field(default_factory=lambda: None) override: Optional[NotesOverride] = field(default_factory=lambda: None) + case_insensitive_sort: bool = field(default=True) def execute(self, nm_name_to_id: dict) -> List[dict]: - notes = self.notes.get_sorted_notes_copy(sort_by_keys=self.sort_order, reverse_sort=self.reverse_sort) + notes = self.notes.get_sorted_notes_copy( + sort_by_keys=self.sort_order, + reverse_sort=self.reverse_sort, + case_insensitive_sort=self.case_insensitive_sort + ) if self.override: notes = [self.override.override(note) for note in notes] diff --git a/brain_brew/build_tasks/csvs/csvs_generate.py b/brain_brew/build_tasks/csvs/csvs_generate.py index ec8832a..a7d3092 100644 --- a/brain_brew/build_tasks/csvs/csvs_generate.py +++ b/brain_brew/build_tasks/csvs/csvs_generate.py @@ -1,8 +1,8 @@ -from dataclasses import dataclass, field +from dataclasses import dataclass from typing import List, Dict, Union from brain_brew.build_tasks.csvs.shared_base_csvs import SharedBaseCsvs -from brain_brew.configuration.build_config.build_task import TopLevelBuildTask +from brain_brew.commands.run_recipe.build_task import TopLevelBuildTask from brain_brew.configuration.part_holder import PartHolder from brain_brew.representation.yaml.notes import Notes, Note from brain_brew.transformers.file_mapping import FileMapping @@ -32,32 +32,45 @@ def yamale_schema(cls) -> str: # TODO: Use NotesOverride here, just as in Notes def yamale_dependencies(cls) -> set: return {NoteModelMapping, FileMapping} - notes: PartHolder[Notes] # TODO: Accept Multiple Note Parts - @dataclass class Representation(SharedBaseCsvs.Representation): notes: str + def encode(self): + return { + "notes": self.notes, + "file_mappings": [fm.encode() for fm in self.file_mappings], + "note_model_mappings": [nmm.encode() for nmm in self.note_model_mappings] + } + @classmethod def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, notes=PartHolder.from_file_manager(rep.notes), file_mappings=rep.get_file_mappings(), - note_model_mappings=dict(*map(cls.map_nmm, rep.note_model_mappings)) + note_model_mappings={k: v for nm in rep.note_model_mappings for k, v in cls.map_nmm(nm).items()} ) + rep: Representation + notes: PartHolder[Notes] # TODO: Accept Multiple Note Parts + def execute(self): self.verify_contents() notes: List[Note] = self.notes.part.get_sorted_notes_copy( - sort_by_keys=[], reverse_sort=False, case_insensitive_sort=False) + sort_by_keys=[], + reverse_sort=False, + case_insensitive_sort=True + ) self.verify_notes_match_note_model_mappings(notes) - csv_data: List[dict] = [self.note_to_csv_row(note, self.note_model_mappings) for note in notes] - rows_by_guid = {row["guid"]: row for row in csv_data} - for fm in self.file_mappings: + csv_data: List[dict] = [self.note_to_csv_row(note, self.note_model_mappings) for note in notes + if note.note_model in fm.get_used_note_model_names()] + rows_by_guid = {row["guid"]: row for row in csv_data} + fm.compile_data() fm.set_relevant_data(rows_by_guid) fm.write_file_on_close() diff --git a/brain_brew/build_tasks/csvs/generate_guids_in_csvs.py b/brain_brew/build_tasks/csvs/generate_guids_in_csvs.py index 5cd7c14..3df51c6 100644 --- a/brain_brew/build_tasks/csvs/generate_guids_in_csvs.py +++ b/brain_brew/build_tasks/csvs/generate_guids_in_csvs.py @@ -2,7 +2,7 @@ from dataclasses import dataclass from typing import List, Union -from brain_brew.configuration.build_config.build_task import TopLevelBuildTask +from brain_brew.commands.run_recipe.build_task import TopLevelBuildTask from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.representation.generic.csv_file import CsvFile from brain_brew.utils import single_item_to_list, generate_anki_guid @@ -36,10 +36,12 @@ class Representation(RepresentationBase): def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, sources=[CsvFile.create_or_get(csv) for csv in single_item_to_list(rep.source)], columns=rep.columns ) + rep: Representation sources: List[CsvFile] columns: List[str] diff --git a/brain_brew/build_tasks/csvs/notes_from_csvs.py b/brain_brew/build_tasks/csvs/notes_from_csvs.py index 723f93e..cb9eaa3 100644 --- a/brain_brew/build_tasks/csvs/notes_from_csvs.py +++ b/brain_brew/build_tasks/csvs/notes_from_csvs.py @@ -2,7 +2,7 @@ from typing import Dict, List, Union, Optional from brain_brew.build_tasks.csvs.shared_base_csvs import SharedBaseCsvs -from brain_brew.configuration.build_config.build_task import BuildPartTask +from brain_brew.commands.run_recipe.build_task import BuildPartTask from brain_brew.configuration.part_holder import PartHolder from brain_brew.representation.yaml.notes import Note, Notes from brain_brew.transformers.file_mapping import FileMapping @@ -38,16 +38,26 @@ class Representation(SharedBaseCsvs.Representation): part_id: str save_to_file: Optional[str] = field(default=None) + def encode(self): + return { + "part_id": self.part_id, + "save_to_file": self.save_to_file, + "file_mappings": [fm.encode() for fm in self.file_mappings], + "note_model_mappings": [nmm.encode() for nmm in self.note_model_mappings] + } + @classmethod def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, part_id=rep.part_id, save_to_file=rep.save_to_file, file_mappings=rep.get_file_mappings(), - note_model_mappings=dict(*map(cls.map_nmm, rep.note_model_mappings)) + note_model_mappings={k: v for nm in rep.note_model_mappings for k, v in cls.map_nmm(nm).items()} ) + rep: Representation part_id: str save_to_file: Optional[str] diff --git a/brain_brew/build_tasks/csvs/shared_base_csvs.py b/brain_brew/build_tasks/csvs/shared_base_csvs.py index 87faf2b..916b61d 100644 --- a/brain_brew/build_tasks/csvs/shared_base_csvs.py +++ b/brain_brew/build_tasks/csvs/shared_base_csvs.py @@ -1,5 +1,5 @@ import logging -from dataclasses import dataclass, field +from dataclasses import dataclass from typing import List, Dict from brain_brew.configuration.representation_base import RepresentationBase @@ -49,14 +49,15 @@ def verify_contents(self): note_model_names = cfm.get_used_note_model_names() available_columns = cfm.get_available_columns() - referenced_note_models_maps = [value for key, value in self.note_model_mappings.items() if - key in note_model_names] + referenced_note_models_maps = [value for key, value in self.note_model_mappings.items() + if key in note_model_names] for nm_map in referenced_note_models_maps: for holder in nm_map.note_models.values(): - missing_columns = [col for col in holder.part.field_names_lowercase if - col not in nm_map.csv_headers_map_to_note_fields(available_columns)] - if missing_columns: - logging.warning(f"Csvs are missing columns from {holder.part_id} {missing_columns}") + if holder.part.name in note_model_names: + missing_columns = [col for col in holder.part.field_names_lowercase if + col not in nm_map.csv_headers_map_to_note_fields(available_columns)] + if missing_columns: + logging.warning(f"Csvs are missing columns from {holder.part_id} {missing_columns}") if errors: raise Exception(errors) diff --git a/brain_brew/build_tasks/deck_parts/from_yaml_part.py b/brain_brew/build_tasks/deck_parts/from_yaml_part.py index fc00d89..a87b54d 100644 --- a/brain_brew/build_tasks/deck_parts/from_yaml_part.py +++ b/brain_brew/build_tasks/deck_parts/from_yaml_part.py @@ -2,11 +2,10 @@ from dataclasses import dataclass from typing import Union -from brain_brew.configuration.build_config.build_task import BuildPartTask +from brain_brew.commands.run_recipe.build_task import BuildPartTask from brain_brew.configuration.part_holder import PartHolder from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.representation.yaml.media_group import MediaGroup -from brain_brew.representation.yaml.note_model import NoteModel from brain_brew.representation.yaml.notes import Notes from brain_brew.representation.yaml.yaml_object import YamlObject @@ -32,6 +31,7 @@ def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, part=PartHolder.override_or_create( part_id=rep.part_id, save_to_file=None, part=cls.part_type.from_yaml_file(rep.file)) ) @@ -39,6 +39,7 @@ def from_repr(cls, data: Union[Representation, dict]): def execute(self): pass + rep: Representation part: YamlObject @@ -51,19 +52,6 @@ def task_name(cls) -> str: part_type = Notes -@dataclass -class NoteModelsFromYamlPart(FromYamlPartBase, BuildPartTask): - @classmethod - def task_name(cls) -> str: - return r'note_models_from_yaml_part' - - @classmethod - def task_regex(cls) -> str: - return r'note_model[s]?_from_yaml_part' - - part_type = NoteModel - - @dataclass class MediaGroupFromYamlPart(FromYamlPartBase, BuildPartTask): @classmethod diff --git a/brain_brew/build_tasks/deck_parts/headers_from_yaml_part.py b/brain_brew/build_tasks/deck_parts/headers_from_yaml_part.py index 477f361..546e998 100644 --- a/brain_brew/build_tasks/deck_parts/headers_from_yaml_part.py +++ b/brain_brew/build_tasks/deck_parts/headers_from_yaml_part.py @@ -1,9 +1,8 @@ -from dataclasses import dataclass -from typing import Union +from dataclasses import dataclass, field +from typing import Union, Optional -from brain_brew.build_tasks.deck_parts.from_yaml_part import FromYamlPartBase from brain_brew.build_tasks.overrides.headers_override import HeadersOverride -from brain_brew.configuration.build_config.build_task import BuildPartTask +from brain_brew.commands.run_recipe.build_task import BuildPartTask from brain_brew.configuration.part_holder import PartHolder from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.representation.yaml.headers import Headers @@ -35,23 +34,33 @@ def task_regex(cls) -> str: class Representation(RepresentationBase): part_id: str file: str - override: dict + override: Optional[dict] = field(default_factory=lambda: None) + + def encode(self): + d = { + "part_id": self.part_id, + "file": self.file + } + if self.override: + d.setdefault("override", self.override) + return d @classmethod def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) - return cls( + rep=rep, headers=PartHolder.override_or_create( part_id=rep.part_id, save_to_file=None, part=Headers.from_yaml_file(rep.file) ).part, - override=HeadersOverride.from_repr(rep.override) + override=HeadersOverride.from_repr(rep.override) if rep.override else None ) + rep: Representation headers: Headers - override: HeadersOverride + override: Optional[HeadersOverride] def execute(self): if self.override: diff --git a/brain_brew/build_tasks/deck_parts/media_group_from_folder.py b/brain_brew/build_tasks/deck_parts/media_group_from_folder.py index 33915c8..4df8644 100644 --- a/brain_brew/build_tasks/deck_parts/media_group_from_folder.py +++ b/brain_brew/build_tasks/deck_parts/media_group_from_folder.py @@ -1,11 +1,11 @@ from dataclasses import dataclass, field from typing import Optional, Union, List -from brain_brew.configuration.build_config.build_task import BuildPartTask +from brain_brew.commands.run_recipe.build_task import BuildPartTask from brain_brew.configuration.part_holder import PartHolder from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.representation.yaml.media_group import MediaGroup -from brain_brew.transformers.media_group_from_location import create_media_group_from_location +from brain_brew.transformers.create_media_group_from_location import create_media_group_from_location @dataclass @@ -18,8 +18,8 @@ def task_name(cls) -> str: def yamale_schema(cls) -> str: return f'''\ part_id: str() - save_to_file: str(required=False) source: str() + save_to_file: str(required=False) recursive: bool(required=False) filter_whitelist_from_parts: list(str(), required=False) filter_blacklist_from_parts: list(str(), required=False) @@ -27,8 +27,8 @@ def yamale_schema(cls) -> str: @dataclass class Representation(RepresentationBase): - source: str part_id: str + source: str filter_blacklist_from_parts: List[str] = field(default_factory=list) filter_whitelist_from_parts: List[str] = field(default_factory=list) recursive: Optional[bool] = field(default=True) @@ -38,6 +38,7 @@ class Representation(RepresentationBase): def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, part=create_media_group_from_location( part_id=rep.part_id, save_to_file=rep.save_to_file, @@ -50,6 +51,7 @@ def from_repr(cls, data: Union[Representation, dict]): ) ) + rep: Representation part: MediaGroup def execute(self): diff --git a/brain_brew/build_tasks/deck_parts/note_model_from_html_parts.py b/brain_brew/build_tasks/deck_parts/note_model_from_html_parts.py index 18f0b38..cff5da2 100644 --- a/brain_brew/build_tasks/deck_parts/note_model_from_html_parts.py +++ b/brain_brew/build_tasks/deck_parts/note_model_from_html_parts.py @@ -1,7 +1,7 @@ from dataclasses import dataclass, field from typing import Optional, Union, List -from brain_brew.configuration.build_config.build_task import BuildPartTask +from brain_brew.commands.run_recipe.build_task import BuildPartTask from brain_brew.configuration.part_holder import PartHolder from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.representation.generic.html_file import HTMLFile @@ -46,6 +46,7 @@ class Representation(RepresentationBase): def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, part_id=rep.part_id, model_id=rep.model_id, css=HTMLFile.create_or_get(rep.css_file).get_data(deep_copy=True), @@ -55,6 +56,7 @@ def from_repr(cls, data: Union[Representation, dict]): save_to_file=rep.save_to_file ) + rep: Representation part_id: str model_id: str css: str diff --git a/brain_brew/build_tasks/deck_parts/note_model_from_yaml_part.py b/brain_brew/build_tasks/deck_parts/note_model_from_yaml_part.py new file mode 100644 index 0000000..de6f387 --- /dev/null +++ b/brain_brew/build_tasks/deck_parts/note_model_from_yaml_part.py @@ -0,0 +1,45 @@ +from dataclasses import dataclass +from typing import Union + +from brain_brew.build_tasks.deck_parts.from_yaml_part import FromYamlPartBase +from brain_brew.commands.run_recipe.build_task import BuildPartTask +from brain_brew.configuration.part_holder import PartHolder +from brain_brew.configuration.representation_base import RepresentationBase +from brain_brew.representation.yaml.note_model import NoteModel + + +@dataclass +class NoteModelsFromYamlPart(FromYamlPartBase, BuildPartTask): + @classmethod + def task_name(cls) -> str: + return r'note_models_from_yaml_part' + + @classmethod + def task_regex(cls) -> str: + return r'note_model[s]?_from_yaml_part' + + @classmethod + def yamale_schema(cls) -> str: + return f'''\ + part_id: str() + file: str() + ''' + + @dataclass + class Representation(RepresentationBase): + part_id: str + file: str + # TODO: Overrides + + @classmethod + def from_repr(cls, data: Union[Representation, dict]): + rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) + + return cls( + rep=rep, + part=PartHolder.override_or_create( + part_id=rep.part_id, save_to_file=None, part=NoteModel.from_yaml_file(rep.file)) + ) + + def execute(self): + pass diff --git a/brain_brew/build_tasks/deck_parts/note_model_template_from_html_files.py b/brain_brew/build_tasks/deck_parts/note_model_template_from_html_files.py deleted file mode 100644 index 70530e6..0000000 --- a/brain_brew/build_tasks/deck_parts/note_model_template_from_html_files.py +++ /dev/null @@ -1,88 +0,0 @@ -from dataclasses import dataclass, field -from typing import Optional - -from brain_brew.configuration.build_config.build_task import BuildPartTask -from brain_brew.configuration.part_holder import PartHolder -from brain_brew.configuration.representation_base import RepresentationBase -from brain_brew.representation.generic.html_file import HTMLFile -from brain_brew.representation.yaml.note_model_template import Template - -html_separator = '\n\n--\n\n' - - -@dataclass -class TemplateFromHTML(BuildPartTask): - @classmethod - def task_name(cls) -> str: - return r'note_model_template_from_html' - - @classmethod - def task_regex(cls) -> str: - return r'note_model_template[s]?_from_html' - - @classmethod - def yamale_schema(cls) -> str: - return f"""\ - part_id: str() - html_file: str() - template_name: str(required=False) - browser_html_file: str(required=False) - deck_override_id: int(required=False) - save_to_file: str(required=False) - """ - - @dataclass - class Representation(RepresentationBase): - part_id: str - html_file: str - template_name: Optional[str] = field(default=None) - browser_html_file: Optional[str] = field(default=None) - deck_override_id: Optional[int] = field(default=None) - save_to_file: Optional[str] = field(default=None) - - @classmethod - def from_repr(cls, data: dict): - rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) - return cls( - part_id=rep.part_id, - template_name=rep.template_name or rep.part_id, - html_file=HTMLFile.create_or_get(rep.html_file), - browser_html_file=HTMLFile.create_or_get(rep.browser_html_file) if rep.browser_html_file else None, - deck_override_id=rep.deck_override_id, - save_to_file=rep.save_to_file - ) - - part_id: str - template_name: str - html_file: HTMLFile - browser_html_file: Optional[HTMLFile] - deck_override_id: Optional[int] - save_to_file: Optional[str] - - def execute(self): - main_data = self.html_file.get_data(deep_copy=True) - browser_data = self.browser_html_file.get_data(deep_copy=True) if self.browser_html_file else None - - if html_separator not in main_data: - raise ValueError(f"Cannot find separator {html_separator} in html file '{self.html_file.file_location}'") - - front, back = tuple(main_data.split(html_separator, maxsplit=1)) - - if browser_data: - if html_separator not in browser_data: - raise ValueError(f"Cannot find separator {html_separator} in html file '{self.browser_html_file.file_location}'") - - browser_front, browser_back = tuple(browser_data.split(html_separator, maxsplit=1)) - else: - browser_front = browser_back = "" - - template = Template( - name=self.template_name, - question_format=front, - answer_format=back, - question_format_in_browser=browser_front, - answer_format_in_browser=browser_back, - deck_override_id=self.deck_override_id - ) - - PartHolder.override_or_create(self.part_id, self.save_to_file, template) diff --git a/brain_brew/build_tasks/deck_parts/media_group_to_folder.py b/brain_brew/build_tasks/deck_parts/save_media_group_to_folder.py similarity index 87% rename from brain_brew/build_tasks/deck_parts/media_group_to_folder.py rename to brain_brew/build_tasks/deck_parts/save_media_group_to_folder.py index 4e3b1d9..13fae21 100644 --- a/brain_brew/build_tasks/deck_parts/media_group_to_folder.py +++ b/brain_brew/build_tasks/deck_parts/save_media_group_to_folder.py @@ -1,15 +1,15 @@ from dataclasses import dataclass, field from typing import List, Union, Optional -from brain_brew.configuration.build_config.build_task import BuildPartTask +from brain_brew.commands.run_recipe.build_task import TopLevelBuildTask from brain_brew.configuration.part_holder import PartHolder from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.representation.yaml.media_group import MediaGroup -from brain_brew.transformers.media_group_save_to_location import save_media_groups_to_location +from brain_brew.transformers.save_media_group_to_location import save_media_groups_to_location @dataclass -class MediaGroupsToFolder(BuildPartTask): +class SaveMediaGroupsToFolder(TopLevelBuildTask): @classmethod def task_name(cls) -> str: return r'save_media_groups_to_folder' @@ -38,12 +38,14 @@ class Representation(RepresentationBase): def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, parts=list(holder.part for holder in map(PartHolder.from_file_manager, rep.parts)), folder=rep.folder, clear_folder=rep.clear_folder or False, recursive=rep.recursive or False ) + rep: Representation parts: List[MediaGroup] folder: str clear_folder: bool diff --git a/brain_brew/build_tasks/deck_parts/save_note_models_to_folder.py b/brain_brew/build_tasks/deck_parts/save_note_models_to_folder.py new file mode 100644 index 0000000..a4f1b0d --- /dev/null +++ b/brain_brew/build_tasks/deck_parts/save_note_models_to_folder.py @@ -0,0 +1,57 @@ +from dataclasses import dataclass, field +from typing import List, Union, Optional, Dict + +from brain_brew.commands.run_recipe.build_task import TopLevelBuildTask +from brain_brew.configuration.part_holder import PartHolder +from brain_brew.configuration.representation_base import RepresentationBase +from brain_brew.representation.yaml.note_model import NoteModel +from brain_brew.transformers.save_note_model_to_location import save_note_model_to_location + + +@dataclass +class SaveNoteModelsToFolder(TopLevelBuildTask): + @classmethod + def task_name(cls) -> str: + return r'save_note_models_to_folder' + + @classmethod + def task_regex(cls) -> str: + return r"save_note_models[s]?_to_folder" + + @classmethod + def yamale_schema(cls) -> str: + return f'''\ + parts: list(str()) + folder: str() + clear_existing: bool(required=False) + ''' + + @dataclass + class Representation(RepresentationBase): + parts: List[str] + folder: str + clear_existing: Optional[bool] = field(default=None) + + @classmethod + def from_repr(cls, data: Union[Representation, dict]): + rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) + return cls( + rep=rep, + parts=list(holder.part for holder in map(PartHolder.from_file_manager, rep.parts)), + folder=rep.folder, + clear_existing=rep.clear_existing or False, + ) + + rep: Representation + parts: List[NoteModel] + folder: str + clear_existing: bool + + def execute(self) -> Dict[str, str]: + model_yaml_files: Dict[str, str] = {} + for model in self.parts: + model_yaml_files.setdefault( + model.name, + save_note_model_to_location(model, self.folder, self.clear_existing) + ) + return model_yaml_files diff --git a/brain_brew/build_tasks/overrides/headers_override.py b/brain_brew/build_tasks/overrides/headers_override.py index e97d349..edc6e39 100644 --- a/brain_brew/build_tasks/overrides/headers_override.py +++ b/brain_brew/build_tasks/overrides/headers_override.py @@ -27,9 +27,11 @@ class Representation(RepresentationBase): def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, deck_desc_html_file=HTMLFile.create_or_get(rep.deck_description_html_file) ) + rep: Representation deck_desc_html_file: Optional[HTMLFile] def override(self, header: Headers): diff --git a/brain_brew/build_tasks/overrides/notes_override.py b/brain_brew/build_tasks/overrides/notes_override.py index afe00c9..8ce4604 100644 --- a/brain_brew/build_tasks/overrides/notes_override.py +++ b/brain_brew/build_tasks/overrides/notes_override.py @@ -3,7 +3,7 @@ from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.interfaces.yamale_verifyable import YamlRepr -from brain_brew.representation.yaml.notes import Notes, Note +from brain_brew.representation.yaml.notes import Note @dataclass @@ -26,9 +26,11 @@ class Representation(RepresentationBase): def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, note_model=rep.note_model ) + rep: Representation note_model: Optional[str] def override(self, note: Note): diff --git a/brain_brew/configuration/build_config/__init__.py b/brain_brew/commands/__init__.py similarity index 100% rename from brain_brew/configuration/build_config/__init__.py rename to brain_brew/commands/__init__.py diff --git a/brain_brew/commands/argument_reader.py b/brain_brew/commands/argument_reader.py new file mode 100644 index 0000000..5ee294e --- /dev/null +++ b/brain_brew/commands/argument_reader.py @@ -0,0 +1,100 @@ +from enum import Enum + +import sys +from argparse import ArgumentParser + +from brain_brew.front_matter import latest_version_number +from brain_brew.commands.init_repo.init_repo import InitRepo +from brain_brew.commands.run_recipe.run_recipe import RunRecipe +from brain_brew.interfaces.command import Command + + +class Commands(Enum): + RUN_RECIPE = "run" + INIT_REPO = "init" + + +class BBArgumentReader(ArgumentParser): + def __init__(self, test_mode=False): + super().__init__( + prog="brainbrew", + description='Manage Flashcards by transforming them to various types.' + ) + + self._set_parser_arguments() + + if not test_mode and len(sys.argv) == 1: + self.print_help(sys.stderr) + sys.exit(1) + + def _set_parser_arguments(self): + + subparsers = self.add_subparsers(parser_class=ArgumentParser, help='Commands that can be run', dest="command") + + parser_run = subparsers.add_parser( + Commands.RUN_RECIPE.value, + help="Run a recipe file. This will convert some data to another format, based on the instructions in the recipe file." + ) + parser_run.add_argument( + "recipe", + metavar="recipe", + type=str, + help="Yaml file to use as the recipe" + ) + parser_run.add_argument( + "--verify", "-v", + action="store_true", + dest="verify_only", + default=False, + help="Only verify the recipe contents, without running it." + ) + + parser_init = subparsers.add_parser( + Commands.INIT_REPO.value, + help="Initialise a Brain Brew repository, using a CrowdAnki export as the base data." + ) + parser_init.add_argument( + "crowdanki_folder", + metavar="crowdanki_folder", + type=str, + help="The folder that stores the CrowdAnki files to build this repo from" + ) + + def get_parsed(self, override_args=None) -> Command: + parsed_args = self.parse_args(args=override_args) + + if parsed_args.command == Commands.RUN_RECIPE.value: + # Required + recipe = self.error_if_blank(parsed_args.recipe) + + # Optional + verify_only = parsed_args.verify_only + + return RunRecipe( + recipe_file_name=recipe, + verify_only=verify_only + ) + + if parsed_args.command == Commands.INIT_REPO.value: + # Required + crowdanki_folder = parsed_args.crowdanki_folder + + return InitRepo( + crowdanki_folder=crowdanki_folder + ) + + raise KeyError("Unknown Command") + + def error_if_blank(self, arg): + if arg == "" or arg is None: + self.error("Required argument missing") + return arg + + def error(self, message): + sys.stderr.write('error: %s\n' % message) + self.print_help() + sys.exit(2) + + def print_help(self, message=None): + print(f"Brain Brew v{latest_version_number()}") + super().print_help(message) diff --git a/brain_brew/commands/init_repo/__init__.py b/brain_brew/commands/init_repo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/brain_brew/commands/init_repo/init_repo.py b/brain_brew/commands/init_repo/init_repo.py new file mode 100644 index 0000000..1226f66 --- /dev/null +++ b/brain_brew/commands/init_repo/init_repo.py @@ -0,0 +1,206 @@ +import os +from dataclasses import dataclass +from typing import List + +from brain_brew.build_tasks.crowd_anki.crowd_anki_generate import CrowdAnkiGenerate +from brain_brew.build_tasks.crowd_anki.headers_from_crowdanki import HeadersFromCrowdAnki +from brain_brew.build_tasks.crowd_anki.headers_to_crowd_anki import HeadersToCrowdAnki +from brain_brew.build_tasks.crowd_anki.media_group_from_crowd_anki import MediaGroupFromCrowdAnki +from brain_brew.build_tasks.crowd_anki.media_group_to_crowd_anki import MediaGroupToCrowdAnki +from brain_brew.build_tasks.crowd_anki.note_models_all_from_crowd_anki import NoteModelsAllFromCrowdAnki +from brain_brew.build_tasks.crowd_anki.note_models_to_crowd_anki import NoteModelsToCrowdAnki +from brain_brew.build_tasks.crowd_anki.notes_from_crowd_anki import NotesFromCrowdAnki +from brain_brew.build_tasks.crowd_anki.notes_to_crowd_anki import NotesToCrowdAnki +from brain_brew.build_tasks.csvs.csvs_generate import CsvsGenerate +from brain_brew.build_tasks.csvs.generate_guids_in_csvs import GenerateGuidsInCsvs +from brain_brew.build_tasks.csvs.notes_from_csvs import NotesFromCsvs +from brain_brew.build_tasks.deck_parts.headers_from_yaml_part import HeadersFromYamlPart +from brain_brew.build_tasks.deck_parts.media_group_from_folder import MediaGroupFromFolder +from brain_brew.build_tasks.deck_parts.note_model_from_yaml_part import NoteModelsFromYamlPart +from brain_brew.build_tasks.deck_parts.save_media_group_to_folder import SaveMediaGroupsToFolder +from brain_brew.build_tasks.deck_parts.save_note_models_to_folder import SaveNoteModelsToFolder +from brain_brew.commands.run_recipe.build_task import TopLevelBuildTask, BuildPartTask +from brain_brew.commands.run_recipe.parts_builder import PartsBuilder +from brain_brew.commands.run_recipe.top_level_builder import TopLevelBuilder +from brain_brew.interfaces.command import Command +from brain_brew.representation.generic.csv_file import CsvFile +from brain_brew.representation.yaml.note_model import NoteModel +from brain_brew.representation.yaml.yaml_object import YamlObject +from brain_brew.transformers.file_mapping import FileMapping +from brain_brew.transformers.note_model_mapping import NoteModelMapping +from brain_brew.utils import create_path_if_not_exists, filename_from_full_path, folder_name_from_full_path + +RECIPE_MEDIA = "deck_media" +RECIPE_HEADERS = "deck_headers" +RECIPE_NOTES = "deck_notes" + +LOC_RECIPES = "recipes/" +LOC_BUILD = "build/" +LOC_DATA = "src/data/" +LOC_HEADERS = "src/headers/" +LOC_NOTE_MODELS = "src/note_models/" +LOC_MEDIA = "src/media/" + + +@dataclass +class InitRepo(Command): + crowdanki_folder: str + + def execute(self): + self.setup_repo_structure() + + # Create the Deck Parts used + headers_ca, note_models_all_ca, notes_ca, media_group_ca = self.parts_from_crowdanki(self.crowdanki_folder) + + headers = headers_ca.execute().part + headers_name = LOC_HEADERS + "header1.yaml" + headers.dump_to_yaml(headers_name) + # TODO: desc file + + note_models = [m.part for m in note_models_all_ca.execute()] + + notes = notes_ca.execute().part + used_note_models_in_notes = notes.get_all_known_note_model_names() + + media_group_ca.execute() + + note_model_mappings = [NoteModelMapping.Representation([model.name for model in note_models])] + file_mappings: List[FileMapping.Representation] = [] + + csv_files = [] + + for model in note_models: + if model.name in used_note_models_in_notes: + csv_file_path = os.path.join(LOC_DATA, CsvFile.to_filename_csv(model.name)) + column_headers = ["guid"] + model.field_names_lowercase + ["tags"] + CsvFile.create_file_with_headers(csv_file_path, column_headers) + + file_mappings.append(FileMapping.Representation( + file=csv_file_path, + note_model=model.name + )) + + csv_files.append(csv_file_path) + + deck_path = os.path.join(LOC_BUILD, folder_name_from_full_path(self.crowdanki_folder)) + + # Generate the Source files that will be kept in the repo + save_note_models_to_folder = SaveNoteModelsToFolder.from_repr(SaveNoteModelsToFolder.Representation( + [m.name for m in note_models], LOC_NOTE_MODELS, True + )) + model_name_to_file_dict = save_note_models_to_folder.execute() + + save_media_to_folder = SaveMediaGroupsToFolder.from_repr(SaveMediaGroupsToFolder.Representation( + parts=[RECIPE_MEDIA], folder=LOC_MEDIA, recursive=True, clear_folder=True + )) + save_media_to_folder.execute() + + generate_csvs = CsvsGenerate.from_repr({ + 'notes': RECIPE_NOTES, + 'note_model_mappings': note_model_mappings, + 'file_mappings': file_mappings + }) + generate_csvs.execute() + + # Create Recipes + + # Anki to Source + headers_recipe, note_models_all_recipe, notes_recipe, media_group_recipe = self.parts_from_crowdanki(deck_path) + + build_part_tasks: List[BuildPartTask] = [ + headers_recipe, + notes_recipe, + note_models_all_recipe, + media_group_recipe, + ] + dp_builder = PartsBuilder(build_part_tasks) + + top_level_tasks: List[TopLevelBuildTask] = [dp_builder, save_media_to_folder, generate_csvs] + self.create_yaml_from_top_level(top_level_tasks, os.path.join(LOC_RECIPES, "anki_to_source")) + + # Source to Anki + note_models_from_yaml = [ + NoteModelsFromYamlPart.from_repr(NoteModelsFromYamlPart.Representation(name, file)) + for name, file in model_name_to_file_dict.items() + ] + + media_group_from_folder = MediaGroupFromFolder.from_repr(MediaGroupFromFolder.Representation( + part_id=RECIPE_MEDIA, source=LOC_MEDIA, recursive=True + )) + + headers_from_yaml = HeadersFromYamlPart.from_repr(HeadersFromYamlPart.Representation( + part_id=RECIPE_HEADERS, file=headers_name + )) + + notes_from_csv = NotesFromCsvs.from_repr({ + 'part_id': RECIPE_NOTES, + 'note_model_mappings': note_model_mappings, + 'file_mappings': file_mappings + }) + + build_part_tasks: List[BuildPartTask] = note_models_from_yaml + [ + headers_from_yaml, + notes_from_csv, + media_group_from_folder, + ] + dp_builder = PartsBuilder(build_part_tasks) + + generate_guids_in_csv = GenerateGuidsInCsvs.from_repr(GenerateGuidsInCsvs.Representation( + source=csv_files, columns=["guid"] + )) + + generate_crowdanki = CrowdAnkiGenerate.from_repr(CrowdAnkiGenerate.Representation( + folder=deck_path, + notes=NotesToCrowdAnki.Representation( + part_id=RECIPE_NOTES + ).encode(), + headers=RECIPE_HEADERS, + media=MediaGroupToCrowdAnki.Representation( + parts=[RECIPE_MEDIA] + ).encode(), + note_models=NoteModelsToCrowdAnki.Representation( + parts=[NoteModelsToCrowdAnki.NoteModelListItem.Representation(name).encode() + for name, file in model_name_to_file_dict.items()] + ).encode() + )) + + top_level_tasks: List[TopLevelBuildTask] = [generate_guids_in_csv, dp_builder, generate_crowdanki] + source_to_anki_path = os.path.join(LOC_RECIPES, "source_to_anki") + self.create_yaml_from_top_level(top_level_tasks, source_to_anki_path) + + print(f"\nRepo Init complete. You should now run `brainbrew run {source_to_anki_path}`") + + @staticmethod + def create_yaml_from_top_level(top_tasks: List[TopLevelBuildTask], filepath: str): + tl_builder = TopLevelBuilder(top_tasks) + + encoded_top_level_tasks = tl_builder.encode() + # print(encoded_top_level_tasks) + + model_yaml_file_name = YamlObject.to_filename_yaml(filepath) + YamlObject.dump_to_yaml_file(model_yaml_file_name, encoded_top_level_tasks) + + @staticmethod + def parts_from_crowdanki(folder: str): + headers_ca = HeadersFromCrowdAnki.from_repr(HeadersFromCrowdAnki.Representation( + source=folder, part_id=RECIPE_HEADERS + )) + note_models_all_ca = NoteModelsAllFromCrowdAnki.from_repr(NoteModelsAllFromCrowdAnki.Representation( + source=folder + )) + notes_ca = NotesFromCrowdAnki.from_repr(NotesFromCrowdAnki.Representation( + source=folder, part_id=RECIPE_NOTES + )) + media_group_ca = MediaGroupFromCrowdAnki.from_repr(MediaGroupFromFolder.Representation( + source=folder, part_id=RECIPE_MEDIA + )) + return headers_ca, note_models_all_ca, notes_ca, media_group_ca + + @staticmethod + def setup_repo_structure(): + create_path_if_not_exists(LOC_RECIPES) + create_path_if_not_exists(LOC_BUILD) + create_path_if_not_exists(LOC_DATA) + create_path_if_not_exists(LOC_HEADERS) + create_path_if_not_exists(LOC_NOTE_MODELS) + create_path_if_not_exists(LOC_MEDIA) diff --git a/brain_brew/commands/run_recipe/__init__.py b/brain_brew/commands/run_recipe/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/brain_brew/configuration/build_config/build_task.py b/brain_brew/commands/run_recipe/build_task.py similarity index 83% rename from brain_brew/configuration/build_config/build_task.py rename to brain_brew/commands/run_recipe/build_task.py index d449020..a95d128 100644 --- a/brain_brew/configuration/build_config/build_task.py +++ b/brain_brew/commands/run_recipe/build_task.py @@ -1,12 +1,17 @@ from abc import ABCMeta, abstractmethod -from typing import Dict, Type, Set +from typing import Dict, Type, Set, Optional +from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.interfaces.yamale_verifyable import YamlRepr class BuildTask(YamlRepr, object, metaclass=ABCMeta): execute_immediately: bool = False accepts_list_of_self: bool = True + rep: Optional[RepresentationBase] + + def encode_rep(self) -> Dict[str, any]: + return self.rep.encode() @abstractmethod def execute(self): diff --git a/brain_brew/configuration/build_config/parts_builder.py b/brain_brew/commands/run_recipe/parts_builder.py similarity index 68% rename from brain_brew/configuration/build_config/parts_builder.py rename to brain_brew/commands/run_recipe/parts_builder.py index 8406de9..f46b6e3 100644 --- a/brain_brew/configuration/build_config/parts_builder.py +++ b/brain_brew/commands/run_recipe/parts_builder.py @@ -3,22 +3,22 @@ from brain_brew.build_tasks.crowd_anki.headers_from_crowdanki import HeadersFromCrowdAnki from brain_brew.build_tasks.crowd_anki.media_group_from_crowd_anki import MediaGroupFromCrowdAnki -from brain_brew.build_tasks.crowd_anki.note_models_from_crowd_anki import NoteModelsFromCrowdAnki +from brain_brew.build_tasks.crowd_anki.note_model_single_from_crowd_anki import NoteModelSingleFromCrowdAnki +from brain_brew.build_tasks.crowd_anki.note_models_all_from_crowd_anki import NoteModelsAllFromCrowdAnki from brain_brew.build_tasks.crowd_anki.notes_from_crowd_anki import NotesFromCrowdAnki from brain_brew.build_tasks.csvs.notes_from_csvs import NotesFromCsvs -from brain_brew.build_tasks.deck_parts.from_yaml_part import NotesFromYamlPart, NoteModelsFromYamlPart, \ - MediaGroupFromYamlPart +from brain_brew.build_tasks.deck_parts.from_yaml_part import NotesFromYamlPart, MediaGroupFromYamlPart +from brain_brew.build_tasks.deck_parts.note_model_from_yaml_part import NoteModelsFromYamlPart from brain_brew.build_tasks.deck_parts.headers_from_yaml_part import HeadersFromYamlPart from brain_brew.build_tasks.deck_parts.media_group_from_folder import MediaGroupFromFolder -from brain_brew.build_tasks.deck_parts.media_group_to_folder import MediaGroupsToFolder from brain_brew.build_tasks.deck_parts.note_model_from_html_parts import NoteModelFromHTMLParts -from brain_brew.build_tasks.deck_parts.note_model_template_from_html_files import TemplateFromHTML -from brain_brew.configuration.build_config.build_task import BuildTask, BuildPartTask, TopLevelBuildTask -from brain_brew.configuration.build_config.recipe_builder import RecipeBuilder +from brain_brew.commands.run_recipe.build_task import BuildTask, BuildPartTask, TopLevelBuildTask +from brain_brew.commands.run_recipe.recipe_builder import RecipeBuilder @dataclass class PartsBuilder(RecipeBuilder, TopLevelBuildTask): + tasks: List[BuildPartTask] accepts_list_of_self: bool = False @classmethod @@ -42,6 +42,9 @@ def from_repr(cls, data: List[dict]): def encode(self) -> dict: pass + def encode_rep(self) -> list: + return self.tasks_to_encoded() + @classmethod def from_yaml_file(cls, filename: str): pass @@ -55,7 +58,7 @@ def yamale_dependencies(cls) -> Set[Type[BuildPartTask]]: return { NotesFromCsvs, NotesFromYamlPart, HeadersFromYamlPart, NoteModelsFromYamlPart, MediaGroupFromYamlPart, - MediaGroupFromFolder, MediaGroupsToFolder, - NoteModelFromHTMLParts, TemplateFromHTML, - HeadersFromCrowdAnki, MediaGroupFromCrowdAnki, NoteModelsFromCrowdAnki, NotesFromCrowdAnki + MediaGroupFromFolder, + NoteModelFromHTMLParts, NoteModelsFromYamlPart, NoteModelSingleFromCrowdAnki, NoteModelsAllFromCrowdAnki, + HeadersFromCrowdAnki, MediaGroupFromCrowdAnki, NotesFromCrowdAnki } diff --git a/brain_brew/configuration/build_config/recipe_builder.py b/brain_brew/commands/run_recipe/recipe_builder.py similarity index 93% rename from brain_brew/configuration/build_config/recipe_builder.py rename to brain_brew/commands/run_recipe/recipe_builder.py index 7a95f67..a4d2f87 100644 --- a/brain_brew/configuration/build_config/recipe_builder.py +++ b/brain_brew/commands/run_recipe/recipe_builder.py @@ -4,7 +4,7 @@ from textwrap import indent from typing import Dict, List, Type, Set -from brain_brew.configuration.build_config.build_task import BuildTask +from brain_brew.commands.run_recipe.build_task import BuildTask from brain_brew.representation.yaml.yaml_object import YamlObject @@ -12,6 +12,9 @@ class RecipeBuilder(YamlObject, metaclass=ABCMeta): tasks: List[BuildTask] + def tasks_to_encoded(self) -> list: + return [{task.task_name(): task.encode_rep()} for task in self.tasks] + @classmethod def from_list(cls, data: List[dict]): tasks = cls.read_tasks(data) diff --git a/brain_brew/commands/run_recipe/run_recipe.py b/brain_brew/commands/run_recipe/run_recipe.py new file mode 100644 index 0000000..0ed18f7 --- /dev/null +++ b/brain_brew/commands/run_recipe/run_recipe.py @@ -0,0 +1,18 @@ +from dataclasses import dataclass +from brain_brew.interfaces.command import Command +from brain_brew.commands.run_recipe.top_level_builder import TopLevelBuilder +from brain_brew.configuration.yaml_verifier import YamlVerifier + + +@dataclass +class RunRecipe(Command): + recipe_file_name: str + verify_only: bool + + def execute(self): + # Parse Build Config File + YamlVerifier() + recipe = TopLevelBuilder.parse_and_read(self.recipe_file_name, self.verify_only) + + if not self.verify_only: + recipe.execute() diff --git a/brain_brew/configuration/build_config/top_level_builder.py b/brain_brew/commands/run_recipe/top_level_builder.py similarity index 78% rename from brain_brew/configuration/build_config/top_level_builder.py rename to brain_brew/commands/run_recipe/top_level_builder.py index c9b78ec..4650f54 100644 --- a/brain_brew/configuration/build_config/top_level_builder.py +++ b/brain_brew/commands/run_recipe/top_level_builder.py @@ -4,9 +4,11 @@ from brain_brew.build_tasks.crowd_anki.crowd_anki_generate import CrowdAnkiGenerate from brain_brew.build_tasks.csvs.csvs_generate import CsvsGenerate from brain_brew.build_tasks.csvs.generate_guids_in_csvs import GenerateGuidsInCsvs -from brain_brew.configuration.build_config.build_task import BuildTask, TopLevelBuildTask -from brain_brew.configuration.build_config.parts_builder import PartsBuilder -from brain_brew.configuration.build_config.recipe_builder import RecipeBuilder +from brain_brew.build_tasks.deck_parts.save_media_group_to_folder import SaveMediaGroupsToFolder +from brain_brew.build_tasks.deck_parts.save_note_models_to_folder import SaveNoteModelsToFolder +from brain_brew.commands.run_recipe.build_task import BuildTask, TopLevelBuildTask +from brain_brew.commands.run_recipe.parts_builder import PartsBuilder +from brain_brew.commands.run_recipe.recipe_builder import RecipeBuilder from brain_brew.interfaces.yamale_verifyable import YamlRepr @@ -69,8 +71,8 @@ def yamale_schema(cls) -> str: def from_repr(cls, data: dict): pass - def encode(self) -> dict: - pass + def encode(self) -> list: + return self.tasks_to_encoded() @classmethod def from_yaml_file(cls, filename: str): @@ -78,4 +80,8 @@ def from_yaml_file(cls, filename: str): @classmethod def yamale_dependencies(cls) -> Set[Type[TopLevelBuildTask]]: - return {CrowdAnkiGenerate, CsvsGenerate, PartsBuilder, GenerateGuidsInCsvs} + return { + PartsBuilder, + CrowdAnkiGenerate, CsvsGenerate, + GenerateGuidsInCsvs, SaveMediaGroupsToFolder, SaveNoteModelsToFolder + } diff --git a/brain_brew/configuration/anki_field.py b/brain_brew/configuration/anki_field.py index 54d147d..878e0ac 100644 --- a/brain_brew/configuration/anki_field.py +++ b/brain_brew/configuration/anki_field.py @@ -11,3 +11,6 @@ def __init__(self, anki_name, name=None, default_value=None): def append_name_if_differs(self, dict_to_add_to: dict, value): if value != self.default_value: dict_to_add_to.setdefault(self.name, value) + + def does_differ(self, value): + return value != self.default_value diff --git a/brain_brew/configuration/argument_reader.py b/brain_brew/configuration/argument_reader.py deleted file mode 100644 index d30841b..0000000 --- a/brain_brew/configuration/argument_reader.py +++ /dev/null @@ -1,56 +0,0 @@ -import sys -from argparse import ArgumentParser - - -class BBArgumentReader(ArgumentParser): - def __init__(self): - super().__init__( - description='Manage Flashcards by Transforming them to various types' - ) - - self._set_parser_arguments() - - def _set_parser_arguments(self): - self.add_argument( - "recipe", - metavar="recipe", - type=str, - help="Yaml file to use as the recipe" - ) - self.add_argument( - "--config", "--global-config", "-c", - action="store", - dest="config_file", - default=None, - type=str, - help="Global config file to use" - ) - self.add_argument( - "--verify", "-v", - action="store_true", - dest="verify_only", - default=False, - help="Only verify the recipe contents, without running it." - ) - - def get_parsed(self, override_args=None): - parsed_args = self.parse_args(args=override_args) - - # Required - recipe = self.error_if_blank(parsed_args.recipe) - - # Optional - config_file = parsed_args.config_file - verify_only = parsed_args.verify_only - - return recipe, config_file, verify_only - - def error_if_blank(self, arg): - if arg == "" or arg is None: - self.error("Required argument missing") - return arg - - def error(self, message): - sys.stderr.write('error: %s\n' % message) - self.print_help() - sys.exit(2) diff --git a/brain_brew/configuration/global_config.py b/brain_brew/configuration/global_config.py deleted file mode 100644 index 7413df7..0000000 --- a/brain_brew/configuration/global_config.py +++ /dev/null @@ -1,48 +0,0 @@ -from dataclasses import dataclass, field -from typing import Union, Optional - -from brain_brew.configuration.representation_base import RepresentationBase -from brain_brew.representation.yaml.yaml_object import YamlObject - - -@dataclass -class GlobalConfig(YamlObject): - __instance = None - - def encode(self) -> dict: - pass - - @dataclass - class Representation(RepresentationBase): - sort_case_insensitive: Optional[bool] = field(default=False) - join_values_with: Optional[str] = field(default=" ") - - sort_case_insensitive: bool - join_values_with: str - - @classmethod - def from_repr(cls, data: Union[Representation, dict]): - rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) - return cls( - sort_case_insensitive=rep.sort_case_insensitive, - join_values_with=rep.join_values_with - ) - - def __post_init__(self): - if GlobalConfig.__instance is None: - GlobalConfig.__instance = self - else: - raise Exception("Multiple GlobalConfigs created") - - @classmethod - def from_yaml_file(cls, filename: str = "brain_brew_config.yaml") -> 'GlobalConfig': - return cls.from_repr(cls.read_to_dict(filename)) - - @classmethod - def get_instance(cls) -> 'GlobalConfig': - return cls.__instance - - @classmethod - def clear_instance(cls): - if cls.__instance: - cls.__instance = None diff --git a/brain_brew/configuration/representation_base.py b/brain_brew/configuration/representation_base.py index 570c7f8..22d5a64 100644 --- a/brain_brew/configuration/representation_base.py +++ b/brain_brew/configuration/representation_base.py @@ -4,3 +4,14 @@ class RepresentationBase: @classmethod def from_dict(cls, data: dict): return cls(**data) # noqa + + def encode(self): + return {key: value for key, value in self.__dict__.items() if self.encode_filter(key, value)} + + @classmethod + def encode_filter(cls, key, value): + if value is None: + return False + if not value: + return False + return True diff --git a/brain_brew/front_matter.py b/brain_brew/front_matter.py new file mode 100644 index 0000000..98f1b65 --- /dev/null +++ b/brain_brew/front_matter.py @@ -0,0 +1,2 @@ +def latest_version_number(): + return "0.3.4" diff --git a/brain_brew/interfaces/command.py b/brain_brew/interfaces/command.py new file mode 100644 index 0000000..3c25a23 --- /dev/null +++ b/brain_brew/interfaces/command.py @@ -0,0 +1,7 @@ +from abc import ABC, abstractmethod + + +class Command(ABC): + @abstractmethod + def execute(self): + pass diff --git a/brain_brew/main.py b/brain_brew/main.py index 9764ef8..5a36fd8 100644 --- a/brain_brew/main.py +++ b/brain_brew/main.py @@ -1,12 +1,9 @@ import logging -from brain_brew.configuration.argument_reader import BBArgumentReader -from brain_brew.configuration.build_config.top_level_builder import TopLevelBuilder -from brain_brew.configuration.file_manager import FileManager -from brain_brew.configuration.global_config import GlobalConfig +from brain_brew.commands.argument_reader import BBArgumentReader # sys.path.append(os.path.join(os.path.dirname(__file__), "dist")) # sys.path.append(os.path.dirname(__file__)) -from brain_brew.configuration.yaml_verifier import YamlVerifier +from brain_brew.configuration.file_manager import FileManager def main(): @@ -14,18 +11,12 @@ def main(): # Read in Arguments argument_reader = BBArgumentReader() - recipe_file_name, global_config_file, verify_only = argument_reader.get_parsed() + command = argument_reader.get_parsed() - # Read in Global Config File - GlobalConfig.from_yaml_file(global_config_file) if global_config_file else GlobalConfig.from_yaml_file() + # Create Singleton FileManager FileManager() - # Parse Build Config File - YamlVerifier() - recipe = TopLevelBuilder.parse_and_read(recipe_file_name, verify_only) - - if not verify_only: - recipe.execute() + command.execute() if __name__ == "__main__": diff --git a/brain_brew/representation/generic/csv_file.py b/brain_brew/representation/generic/csv_file.py index d2931d5..bbd2423 100644 --- a/brain_brew/representation/generic/csv_file.py +++ b/brain_brew/representation/generic/csv_file.py @@ -77,5 +77,12 @@ def to_filename_csv(filename: str) -> str: def formatted_file_location(cls, location): return cls.to_filename_csv(location) - def sort_data(self, sort_by_keys, reverse_sort): - self._data = sort_dict(self._data, sort_by_keys, reverse_sort) + def sort_data(self, sort_by_keys, reverse_sort, case_insensitive_sort): + self._data = sort_dict(self._data, sort_by_keys, reverse_sort, case_insensitive_sort) + + @classmethod + def create_file_with_headers(cls, filepath: str, headers: List[str]): + with open(filepath, mode='w+', newline='', encoding=_encoding) as csv_file: + csv_writer = csv.DictWriter(csv_file, fieldnames=headers, lineterminator='\n') + + csv_writer.writeheader() diff --git a/brain_brew/representation/generic/html_file.py b/brain_brew/representation/generic/html_file.py index c1682cf..283ca41 100644 --- a/brain_brew/representation/generic/html_file.py +++ b/brain_brew/representation/generic/html_file.py @@ -24,9 +24,14 @@ def read_file(self): def get_data(self, deep_copy=False) -> str: return self.get_deep_copy(self._data) if deep_copy else self._data + @staticmethod + def write_file(file_location, data): + with open(file_location, "w+") as file: + file.write(data) + @staticmethod def to_filename_html(filename: str) -> str: - return filename + ".csv" if not filename.endswith(".csv") else filename + return filename + ".html" if not filename.endswith(".html") else filename @classmethod def formatted_file_location(cls, location): diff --git a/brain_brew/representation/json/crowd_anki_export.py b/brain_brew/representation/json/crowd_anki_export.py index e4e4099..5652049 100644 --- a/brain_brew/representation/json/crowd_anki_export.py +++ b/brain_brew/representation/json/crowd_anki_export.py @@ -44,7 +44,7 @@ def find_json_file_in_folder(self): if len(files) == 1: return files[0] elif not files: - file_loc = self.folder_location + self.folder_location.split("/")[-2] + ".json" + file_loc = self.folder_location + "deck.json" logging.warning(f"Creating missing json file '{file_loc}'") return file_loc else: @@ -60,4 +60,4 @@ def _read_json_file(self): self.note_models = list(map(NoteModel.from_crowdanki, self.json_data.note_models)) else: self.write_to_files({}) - self.json_data = {} + self.json_data = CrowdAnkiJsonWrapper({}) diff --git a/brain_brew/representation/json/json_file.py b/brain_brew/representation/json/json_file.py index 2fb0540..c1ccfba 100644 --- a/brain_brew/representation/json/json_file.py +++ b/brain_brew/representation/json/json_file.py @@ -8,12 +8,18 @@ class JsonFile: def pretty_print(data): return json.dumps(data, indent=4) + @staticmethod + def to_filename_json(filename: str): + if filename[-5:] != ".json": + return filename + ".json" + return filename + @staticmethod def read_file(file_location): - with open(file_location, "r", encoding=_encoding) as read_file: + with open(JsonFile.to_filename_json(file_location), "r", encoding=_encoding) as read_file: return json.load(read_file) @staticmethod def write_file(file_location, data): - with open(file_location, "w+", encoding=_encoding) as write_file: + with open(JsonFile.to_filename_json(file_location), "w+", encoding=_encoding) as write_file: json.dump(data, write_file, indent=4, sort_keys=False, ensure_ascii=False) diff --git a/brain_brew/representation/json/wrappers_for_crowd_anki.py b/brain_brew/representation/json/wrappers_for_crowd_anki.py index bac4b58..66beb86 100644 --- a/brain_brew/representation/json/wrappers_for_crowd_anki.py +++ b/brain_brew/representation/json/wrappers_for_crowd_anki.py @@ -28,7 +28,8 @@ def children(self) -> list: @property def note_models(self) -> list: - return self.data.get(CA_NOTE_MODELS, []) + return CrowdAnkiJsonWrapper.get_from_self_and_children_recursively(self.data, [], CA_NOTE_MODELS) + @note_models.setter def note_models(self, value: list): @@ -36,7 +37,7 @@ def note_models(self, value: list): @property def notes(self) -> list: - return self.data.get(CA_NOTES, []) + return CrowdAnkiJsonWrapper.get_from_self_and_children_recursively(self.data, [], CA_NOTES) @notes.setter def notes(self, value: list): @@ -44,7 +45,7 @@ def notes(self, value: list): @property def media_files(self) -> list: - return self.data.get(CA_MEDIA_FILES, []) + return CrowdAnkiJsonWrapper.get_from_self_and_children_recursively(self.data, [], CA_MEDIA_FILES) @media_files.setter def media_files(self, value: list): @@ -58,6 +59,15 @@ def name(self) -> list: def name(self, value: list): self.data[CA_NAME] = value + @staticmethod + def get_from_self_and_children_recursively(data: dict, running_data: list, key_name: str): + running_data += data.get(key_name, []) + children = data.get(CA_CHILDREN, []) + if isinstance(children, list): + for child in children: + running_data = CrowdAnkiJsonWrapper.get_from_self_and_children_recursively(child, running_data, key_name) + return running_data + class CrowdAnkiNoteWrapper: data: dict diff --git a/brain_brew/representation/yaml/note_model.py b/brain_brew/representation/yaml/note_model.py index 11f7bdd..74c5ee9 100644 --- a/brain_brew/representation/yaml/note_model.py +++ b/brain_brew/representation/yaml/note_model.py @@ -5,6 +5,8 @@ from brain_brew.configuration.anki_field import AnkiField from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.interfaces.media_container import MediaContainer +from brain_brew.interfaces.yamale_verifyable import YamlRepr +from brain_brew.representation.generic.html_file import HTMLFile from brain_brew.representation.yaml.note_model_field import Field from brain_brew.representation.yaml.note_model_template import Template from brain_brew.representation.yaml.yaml_object import YamlObject @@ -25,7 +27,8 @@ "amssymb,amsmath}\n\\pagestyle{empty}\n\\setlength{\\parindent}{0in}\n\\begin{" "document}\n") LATEX_POST = AnkiField("latexPost", "latex_post", default_value="\\end{document}") -REQUIRED_FIELDS_PER_TEMPLATE = AnkiField("req", "required_fields_per_template") +LATEX_SVG = AnkiField("latexsvg", "latex_svg", default_value=False) +REQUIRED_FIELDS_PER_TEMPLATE = AnkiField("req", "required_fields_per_template", default_value=[]) FIELDS = AnkiField("flds", "fields") TEMPLATES = AnkiField("tmpls", "templates") TAGS = AnkiField("tags", default_value=[]) @@ -48,33 +51,100 @@ DECK_OVERRIDE_ID = AnkiField("did", "deck_override_id", default_value=None) +CSS_FILE = AnkiField("css_file") + + @dataclass -class NoteModel(YamlObject, MediaContainer, RepresentationBase): +class NoteModel(YamlObject, YamlRepr, MediaContainer): + @classmethod + def task_name(cls) -> str: + return r"note_model_from_yaml_repr_inner" + + @classmethod + def yamale_schema(cls) -> str: + return f"""\ + {NAME.name}: str() + {CROWDANKI_ID.name}: str() + {CSS_FILE.name}: str() + {FIELDS.name}: include({Field.task_name()}, required=False) + {TEMPLATES.name}: include({Template.task_name()}, required=False) + {REQUIRED_FIELDS_PER_TEMPLATE.name}: list(required=False) + {LATEX_POST.name}: str(required=False) + {LATEX_PRE.name}: str(required=False) + {SORT_FIELD_NUM.name}: int(required=False) + {IS_CLOZE.name}: bool(required=False) + {CROWDANKI_TYPE.name}: str(required=False) + {TAGS.name}: str(required=False) + {VERSION.name}: list(required=False) + """ + + @classmethod + def yamale_dependencies(cls) -> set: + return {Field, Template} + + @dataclass + class Representation(RepresentationBase): + name: str + id: str + css_file: str + fields: List[dict] + templates: List[dict] + + required_fields_per_template: List[list] = field(default_factory=lambda: []) + latex_post: str = field(default=LATEX_POST.default_value) + latex_pre: str = field(default=LATEX_PRE.default_value) + latex_svg: bool = field(default=LATEX_SVG.default_value) + sort_field_num: int = field(default=SORT_FIELD_NUM.default_value) + is_cloze: bool = field(default=IS_CLOZE.default_value) + crowdanki_type: str = field(default=CROWDANKI_TYPE.default_value) # Should always be "NoteModel" + tags: List[str] = field(default_factory=lambda: TAGS.default_value) # Tags of the last added note + version: list = field(default_factory=lambda: VERSION.default_value) # Legacy version number. Deprecated in Anki + + @classmethod + def from_repr(cls, data: Union[Representation, dict]): + rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) + return cls( + rep=rep, + fields=[Field.from_repr(f) for f in rep.fields], + templates=[Template.from_html_files(t) for t in rep.templates], + css=HTMLFile.create_or_get(rep.css_file).get_data(deep_copy=False), + + name=rep.name, is_cloze=bool(rep.is_cloze), + latex_pre=rep.latex_pre, latex_post=rep.latex_post, latex_svg=rep.latex_svg, + required_fields_per_template=rep.required_fields_per_template, + tags=rep.tags, sort_field_num=rep.sort_field_num, version=rep.version, + id=rep.id, crowdanki_type=rep.crowdanki_type + ) + @dataclass class CrowdAnki(RepresentationBase): name: str crowdanki_uuid: str css: str - req: List[list] flds: List[dict] tmpls: List[dict] + req: List[list] = field(default_factory=lambda: REQUIRED_FIELDS_PER_TEMPLATE.default_value) latexPre: str = field(default=LATEX_PRE.default_value) latexPost: str = field(default=LATEX_POST.default_value) + latexsvg: bool = field(default=LATEX_SVG.default_value) # TODO: Fix lowercase here in CrowdAnki __type__: str = field(default=CROWDANKI_TYPE.default_value) tags: List[str] = field(default_factory=lambda: TAGS.default_value) sortf: int = field(default=SORT_FIELD_NUM.default_value) type: int = field(default=0) # Is_Cloze Manually set to 0 vers: list = field(default_factory=lambda: VERSION.default_value) + rep: Union[Representation, CrowdAnki] + name: str id: str css: str - required_fields_per_template: List[list] fields: List[Field] templates: List[Template] + required_fields_per_template: List[list] = field(default_factory=lambda: REQUIRED_FIELDS_PER_TEMPLATE.default_value) latex_post: str = field(default=LATEX_POST.default_value) latex_pre: str = field(default=LATEX_PRE.default_value) + latex_svg: bool = field(default=LATEX_SVG.default_value) sort_field_num: int = field(default=SORT_FIELD_NUM.default_value) is_cloze: bool = field(default=IS_CLOZE.default_value) crowdanki_type: str = field(default=CROWDANKI_TYPE.default_value) # Should always be "NoteModel" @@ -84,20 +154,17 @@ class CrowdAnki(RepresentationBase): @classmethod def from_yaml_file(cls, filename: str): data = cls.read_to_dict(filename) - return cls( - fields=[Field.from_dict(f) for f in data.pop(FIELDS.name)], - templates=[Template.from_dict(t) for t in data.pop(TEMPLATES.name)], - **data - ) + return cls.from_repr(data) @classmethod def from_crowdanki(cls, data: Union[CrowdAnki, dict]): # TODO: field_whitelist, note_model_whitelist ca: cls.CrowdAnki = data if isinstance(data, cls.CrowdAnki) else cls.CrowdAnki.from_dict(data) return cls( + rep=ca, fields=[Field.from_crowd_anki(f) for f in ca.flds], templates=[Template.from_crowdanki(t) for t in ca.tmpls], is_cloze=bool(ca.type), - name=ca.name, css=ca.css, latex_pre=ca.latexPre, latex_post=ca.latexPost, + name=ca.name, css=ca.css, latex_pre=ca.latexPre, latex_post=ca.latexPost, latex_svg=ca.latexsvg, required_fields_per_template=ca.req, tags=ca.tags, sort_field_num=ca.sortf, version=ca.vers, id=ca.crowdanki_uuid, crowdanki_type=ca.__type__ ) @@ -110,6 +177,7 @@ def encode_as_crowdanki(self) -> dict: REQUIRED_FIELDS_PER_TEMPLATE.anki_name: self.required_fields_per_template, LATEX_PRE.anki_name: self.latex_pre, LATEX_POST.anki_name: self.latex_post, + LATEX_SVG.anki_name: self.latex_svg, SORT_FIELD_NUM.anki_name: self.sort_field_num, CROWDANKI_TYPE.anki_name: self.crowdanki_type, TAGS.anki_name: self.tags, @@ -122,6 +190,30 @@ def encode_as_crowdanki(self) -> dict: return OrderedDict(sorted(data_dict.items())) + def encode_as_part_with_empty_file_references(self) -> dict: + data_dict: Dict[str, Union[str, list]] = { + NAME.name: self.name, + CROWDANKI_ID.name: self.id, + CSS_FILE.name: "" + } + + SORT_FIELD_NUM.append_name_if_differs(data_dict, self.sort_field_num) + IS_CLOZE.append_name_if_differs(data_dict, self.is_cloze) + LATEX_PRE.append_name_if_differs(data_dict, self.latex_pre) + LATEX_POST.append_name_if_differs(data_dict, self.latex_post) + LATEX_SVG.append_name_if_differs(data_dict, self.latex_svg) + + data_dict.setdefault(FIELDS.name, [f.encode_as_part() for f in self.fields]) + data_dict.setdefault(TEMPLATES.name, [t.encode_as_part() for t in self.templates]) + + # Useless + TAGS.append_name_if_differs(data_dict, self.tags) + VERSION.append_name_if_differs(data_dict, self.version) + CROWDANKI_TYPE.append_name_if_differs(data_dict, self.crowdanki_type) + REQUIRED_FIELDS_PER_TEMPLATE.append_name_if_differs(data_dict, self.required_fields_per_template) + + return data_dict + def encode(self) -> dict: data_dict: Dict[str, Union[str, list]] = { NAME.name: self.name, @@ -133,6 +225,7 @@ def encode(self) -> dict: IS_CLOZE.append_name_if_differs(data_dict, self.is_cloze) LATEX_PRE.append_name_if_differs(data_dict, self.latex_pre) LATEX_POST.append_name_if_differs(data_dict, self.latex_post) + LATEX_SVG.append_name_if_differs(data_dict, self.latex_svg) data_dict.setdefault(FIELDS.name, [f.encode_as_part() for f in self.fields]) data_dict.setdefault(TEMPLATES.name, [t.encode() for t in self.templates]) @@ -171,7 +264,7 @@ def check_field_extra(self, fields_to_check: List[str]): def zip_field_to_data(self, data: List[str]) -> dict: if len(self.fields) != len(data): raise Exception( - f"Data of length {len(data)} cannot map to fields of length {len(self.field_names_lowercase)}") + f"Data of length {len(data)} cannot map to fields of length {len(self.field_names_lowercase)}", data, self.field_names_lowercase) return dict(zip(self.field_names_lowercase, data)) diff --git a/brain_brew/representation/yaml/note_model_field.py b/brain_brew/representation/yaml/note_model_field.py index 10e7a5e..eebf277 100644 --- a/brain_brew/representation/yaml/note_model_field.py +++ b/brain_brew/representation/yaml/note_model_field.py @@ -32,7 +32,7 @@ def yamale_schema(cls) -> str: @classmethod def from_repr(cls, data: dict): - pass + return cls.from_dict(data) @dataclass class CrowdAnki(RepresentationBase): diff --git a/brain_brew/representation/yaml/note_model_template.py b/brain_brew/representation/yaml/note_model_template.py index e99f882..15c472a 100644 --- a/brain_brew/representation/yaml/note_model_template.py +++ b/brain_brew/representation/yaml/note_model_template.py @@ -1,10 +1,13 @@ +import os from dataclasses import dataclass, field from typing import Optional, Union, Set from brain_brew.configuration.anki_field import AnkiField from brain_brew.configuration.representation_base import RepresentationBase +from brain_brew.interfaces.yamale_verifyable import YamlRepr +from brain_brew.representation.generic.html_file import HTMLFile from brain_brew.representation.yaml.yaml_object import YamlObject -from brain_brew.utils import find_media_in_field +from brain_brew.utils import find_media_in_field, split_by_regex NAME = AnkiField("name") ORDINAL = AnkiField("ord", "ordinal") @@ -13,10 +16,46 @@ BROWSER_ANSWER_FORMAT = AnkiField("bafmt", "browser_answer_format", default_value="") BROWSER_QUESTION_FORMAT = AnkiField("bqfmt", "browser_question_format", default_value="") DECK_OVERRIDE_ID = AnkiField("did", "deck_override_id", default_value=None) +BROWSER_FONT = AnkiField("bfont", "browser_font", default_value="") +BROWSER_FONT_SIZE = AnkiField("bsize", "browser_font_size", default_value=0) +SCRATCH_PAD = AnkiField("scratchPad", "scratch_pad", default_value=0) + +HTML_FILE = AnkiField("html_file") +BROWSER_HTML_FILE = AnkiField("browser_html_file", default_value=None) + +html_separator_regex = r'[\n]{1,}[-]{1,}[\n]{1,}' @dataclass -class Template(RepresentationBase, YamlObject): +class Template(RepresentationBase, YamlObject, YamlRepr): + @classmethod + def task_name(cls) -> str: + return r'note_model_template_from_html' + + @classmethod + def yamale_schema(cls) -> str: + return f"""\ + name: str() + html_file: str() + browser_html_file: str(required=False) + deck_override_id: int(required=False) + """ + + @dataclass + class HTML(RepresentationBase): + name: str + html_file: str + browser_html_file: Optional[str] = field(default=None) + browser_font: str = field(default=BROWSER_FONT.default_value) + browser_font_size: int = field(default=BROWSER_FONT_SIZE.default_value) + deck_override_id: Optional[int] = field(default=DECK_OVERRIDE_ID.default_value) + scratch_pad: int = field(default=SCRATCH_PAD.default_value) + + @classmethod + def from_repr(cls, data: Union[HTML, dict]): + rep: cls.HTML = data if isinstance(data, cls.HTML) else cls.HTML.from_dict(data) + return cls.from_html_files(rep) + @classmethod def from_yaml_file(cls, filename: str) -> 'Template': return cls.from_dict(cls.read_to_dict(filename)) @@ -28,41 +67,96 @@ class CrowdAnki(RepresentationBase): afmt: str bqfmt: str = field(default=BROWSER_QUESTION_FORMAT.default_value) bafmt: str = field(default=BROWSER_ANSWER_FORMAT.default_value) + bfont: str = field(default=BROWSER_FONT.default_value) + bsize: int = field(default=BROWSER_FONT_SIZE.default_value) ord: int = field(default=None) did: Optional[int] = field(default=None) + scratchPad: int = field(default=SCRATCH_PAD.default_value) name: str question_format: str answer_format: str question_format_in_browser: str = field(default=BROWSER_QUESTION_FORMAT.default_value) answer_format_in_browser: str = field(default=BROWSER_ANSWER_FORMAT.default_value) + browser_font: str = field(default=BROWSER_FONT.default_value) + browser_font_size: int = field(default=BROWSER_FONT_SIZE.default_value) deck_override_id: Optional[int] = field(default=DECK_OVERRIDE_ID.default_value) + scratch_pad: int = field(default=SCRATCH_PAD.default_value) + + html_file: Optional[str] = field(default="") + browser_html_file: Optional[str] = field(default="") + + @classmethod + def from_html_files(cls, data: Union[HTML, dict]): + html_rep: cls.HTML = data if isinstance(data, cls.HTML) else cls.HTML.from_dict(data) + + html_file = HTMLFile.create_or_get(html_rep.html_file) + browser_html_file = HTMLFile.create_or_get(html_rep.browser_html_file) if html_rep.browser_html_file else None + + main_data = html_file.get_data(deep_copy=True) + browser_data = browser_html_file.get_data(deep_copy=True) if browser_html_file else None + + def split_template(the_data, file): + split = split_by_regex(the_data, html_separator_regex) + if len(split) != 2: + raise ValueError(f"Cannot find" if len(split) < 2 else "More than one" + f" separator '---' in html file '{file.file_location}'") + return split[0], split[1] + + front, back = split_template(main_data, html_file) + browser_front, browser_back = split_template(browser_data, browser_html_file) if browser_data else ("", "") + + return cls( + name=html_rep.name, + question_format=front, + answer_format=back, + question_format_in_browser=browser_front, + answer_format_in_browser=browser_back, + deck_override_id=html_rep.deck_override_id, + html_file=html_rep.html_file, + browser_html_file=html_rep.browser_html_file, + browser_font=html_rep.browser_font, + browser_font_size=html_rep.browser_font_size, + scratch_pad=html_rep.scratch_pad, + ) @classmethod def from_crowdanki(cls, data: Union[CrowdAnki, dict]): ca: cls.CrowdAnki = data if isinstance(data, cls.CrowdAnki) else cls.CrowdAnki.from_dict(data) return cls( name=ca.name, question_format=ca.qfmt, answer_format=ca.afmt, - question_format_in_browser=ca.bqfmt, answer_format_in_browser=ca.bafmt, deck_override_id=ca.did + question_format_in_browser=ca.bqfmt, answer_format_in_browser=ca.bafmt, + deck_override_id=ca.did, browser_font=ca.bfont, browser_font_size=ca.bsize, scratch_pad=ca.scratchPad, ) - def get_all_media_references(self) -> Set[str]: - all_media = set()\ - .union(find_media_in_field(self.question_format))\ - .union(find_media_in_field(self.answer_format))\ - .union(find_media_in_field(self.question_format_in_browser))\ - .union(find_media_in_field(self.answer_format_in_browser)) - return all_media + def encode_as_part(self): + data_dict = { + NAME.name: self.name, + HTML_FILE.name: "" + } + + if self.has_browser_template(): + data_dict.setdefault(BROWSER_HTML_FILE.name, "") + + DECK_OVERRIDE_ID.append_name_if_differs(data_dict, self.deck_override_id) + BROWSER_FONT.append_name_if_differs(data_dict, self.browser_font) + BROWSER_FONT_SIZE.append_name_if_differs(data_dict, self.browser_font_size) + SCRATCH_PAD.append_name_if_differs(data_dict, self.scratch_pad) + + return data_dict def encode_as_crowdanki(self, ordinal: int) -> dict: data_dict = { ANSWER_FORMAT.anki_name: self.answer_format, BROWSER_ANSWER_FORMAT.anki_name: self.answer_format_in_browser, + BROWSER_FONT.anki_name: self.browser_font, BROWSER_QUESTION_FORMAT.anki_name: self.question_format_in_browser, + BROWSER_FONT_SIZE.anki_name: self.browser_font_size, DECK_OVERRIDE_ID.anki_name: self.deck_override_id, NAME.anki_name: self.name, ORDINAL.anki_name: ordinal, QUESTION_FORMAT.anki_name: self.question_format, + SCRATCH_PAD.anki_name: self.scratch_pad, } return data_dict @@ -77,5 +171,26 @@ def encode(self) -> dict: BROWSER_QUESTION_FORMAT.append_name_if_differs(data_dict, self.question_format_in_browser) BROWSER_ANSWER_FORMAT.append_name_if_differs(data_dict, self.answer_format_in_browser) DECK_OVERRIDE_ID.append_name_if_differs(data_dict, self.deck_override_id) + BROWSER_FONT.append_name_if_differs(data_dict, self.browser_font) + BROWSER_FONT_SIZE.append_name_if_differs(data_dict, self.browser_font_size) + SCRATCH_PAD.append_name_if_differs(data_dict, self.scratch_pad) return data_dict + + def get_all_media_references(self) -> Set[str]: + all_media = set() \ + .union(find_media_in_field(self.question_format)) \ + .union(find_media_in_field(self.answer_format)) \ + .union(find_media_in_field(self.question_format_in_browser)) \ + .union(find_media_in_field(self.answer_format_in_browser)) + return all_media + + def has_browser_template(self): + return BROWSER_QUESTION_FORMAT.does_differ(self.question_format_in_browser) \ + or BROWSER_ANSWER_FORMAT.does_differ(self.answer_format_in_browser) + + def get_template_files_data(self): + template = f"{self.question_format}\n\n--\n\n{self.answer_format}" + browser_template = f"{self.question_format}\n\n--\n\n{self.answer_format}" if self.has_browser_template() else None + + return template, browser_template diff --git a/brain_brew/representation/yaml/notes.py b/brain_brew/representation/yaml/notes.py index 5d70a5f..ae90dcd 100644 --- a/brain_brew/representation/yaml/notes.py +++ b/brain_brew/representation/yaml/notes.py @@ -3,7 +3,6 @@ from dataclasses import dataclass from typing import List, Optional, Dict, Set -from brain_brew.configuration.global_config import GlobalConfig from brain_brew.interfaces.media_container import MediaContainer from brain_brew.representation.yaml.yaml_object import YamlObject from brain_brew.utils import find_media_in_field @@ -105,10 +104,7 @@ def get_all_media_references(self) -> Set[str]: all_media = all_media.union(media) return all_media - def get_sorted_notes(self, sort_by_keys, reverse_sort, case_insensitive_sort=None): - if case_insensitive_sort is None: - case_insensitive_sort = GlobalConfig.get_instance().sort_case_insensitive - + def get_sorted_notes(self, sort_by_keys, reverse_sort, case_insensitive_sort): if sort_by_keys: def sort_method(i: Note): def get_sort_tuple(attr_or_field): @@ -132,7 +128,7 @@ def get_sort_tuple(attr_or_field): return self.notes - def get_all_notes_copy(self, sort_by_keys, reverse_sort, case_insensitive_sort=None) -> List[Note]: + def get_all_notes_copy(self, sort_by_keys, reverse_sort, case_insensitive_sort) -> List[Note]: def join_tags(n_tags): if self.tags is None and n_tags is None: return [] @@ -183,6 +179,6 @@ def get_all_media_references(self) -> Set[str]: all_media = all_media.union(media) return all_media - def get_sorted_notes_copy(self, sort_by_keys, reverse_sort, case_insensitive_sort=None): + def get_sorted_notes_copy(self, sort_by_keys, reverse_sort, case_insensitive_sort): return [note for group in self.note_groupings for note in group.get_all_notes_copy(sort_by_keys, reverse_sort, case_insensitive_sort)] diff --git a/brain_brew/representation/yaml/yaml_object.py b/brain_brew/representation/yaml/yaml_object.py index ca9e2e2..3a05198 100644 --- a/brain_brew/representation/yaml/yaml_object.py +++ b/brain_brew/representation/yaml/yaml_object.py @@ -18,7 +18,7 @@ class YamlObject(ABC): @staticmethod def read_to_dict(filename: str): - filename = YamlObject.append_yaml_if_needed(filename) + filename = YamlObject.to_filename_yaml(filename) if not Path(filename).is_file(): raise FileNotFoundError(filename) @@ -27,7 +27,7 @@ def read_to_dict(filename: str): return yaml_load.load(file) @staticmethod - def append_yaml_if_needed(filename: str): + def to_filename_yaml(filename: str): if filename[-5:] != ".yaml" and filename[-4:] != ".yml": return filename + ".yaml" return filename @@ -42,10 +42,14 @@ def from_yaml_file(cls, filename: str) -> 'YamlObject': pass def dump_to_yaml(self, filepath): - filepath = YamlObject.append_yaml_if_needed(filepath) + self.dump_to_yaml_file(filepath, self.encode()) + + @classmethod + def dump_to_yaml_file(cls, filepath, data): + filepath = YamlObject.to_filename_yaml(filepath) create_path_if_not_exists(filepath) with open(filepath, 'w') as fp: - yaml_dump.dump(self.encode(), fp) + yaml_dump.dump(data, fp) diff --git a/brain_brew/schemas/recipe.yaml b/brain_brew/schemas/recipe.yaml index 4e6b9db..52f809c 100644 --- a/brain_brew/schemas/recipe.yaml +++ b/brain_brew/schemas/recipe.yaml @@ -2,7 +2,9 @@ list( map(include('build_parts'), key=regex('build_part[s]?', ignore_case=True)), map(any(include('generate_crowd_anki'), list(include('generate_crowd_anki'))), key=regex('generate_crowd_anki', ignore_case=True)), map(any(include('generate_csvs'), list(include('generate_csvs'))), key=regex('generate_csv[s]?', ignore_case=True)), - map(any(include('generate_guids_in_csvs'), list(include('generate_guids_in_csvs'))), key=regex('generate_guids_in_csv[s]?', ignore_case=True)) + map(any(include('generate_guids_in_csvs'), list(include('generate_guids_in_csvs'))), key=regex('generate_guids_in_csv[s]?', ignore_case=True)), + map(any(include('save_media_groups_to_folder'), list(include('save_media_groups_to_folder'))), key=regex('save_media_group[s]?_to_folder', ignore_case=True)), + map(any(include('save_note_models_to_folder'), list(include('save_note_models_to_folder'))), key=regex('save_note_models[s]?_to_folder', ignore_case=True)) ) @@ -15,14 +17,13 @@ build_parts: map(any(include('media_group_from_crowd_anki'), list(include('media_group_from_crowd_anki'))), key=regex('media_group_from_crowd_anki', ignore_case=True)), map(any(include('media_group_from_folder'), list(include('media_group_from_folder'))), key=regex('media_group_from_folder', ignore_case=True)), map(any(include('media_group_from_yaml_part'), list(include('media_group_from_yaml_part'))), key=regex('media_group_from_yaml_part', ignore_case=True)), + map(any(include('note_model_from_crowd_anki'), list(include('note_model_from_crowd_anki'))), key=regex('note_model_from_crowd_anki', ignore_case=True)), map(any(include('note_model_from_html_parts'), list(include('note_model_from_html_parts'))), key=regex('note_model_from_html_parts', ignore_case=True)), - map(any(include('note_model_template_from_html'), list(include('note_model_template_from_html'))), key=regex('note_model_template[s]?_from_html', ignore_case=True)), - map(any(include('note_models_from_crowd_anki'), list(include('note_models_from_crowd_anki'))), key=regex('note_model[s]?_from_crowd_anki', ignore_case=True)), + map(any(include('note_models_all_from_crowd_anki'), list(include('note_models_all_from_crowd_anki'))), key=regex('note_models_all_from_crowd_anki', ignore_case=True)), map(any(include('note_models_from_yaml_part'), list(include('note_models_from_yaml_part'))), key=regex('note_model[s]?_from_yaml_part', ignore_case=True)), map(any(include('notes_from_crowd_anki'), list(include('notes_from_crowd_anki'))), key=regex('notes_from_crowd_anki', ignore_case=True)), map(any(include('notes_from_csvs'), list(include('notes_from_csvs'))), key=regex('notes_from_csv[s]?', ignore_case=True)), - map(any(include('notes_from_yaml_part'), list(include('notes_from_yaml_part'))), key=regex('notes_from_yaml_part', ignore_case=True)), - map(any(include('save_media_groups_to_folder'), list(include('save_media_groups_to_folder'))), key=regex('save_media_group[s]?_to_folder', ignore_case=True)) + map(any(include('notes_from_yaml_part'), list(include('notes_from_yaml_part'))), key=regex('notes_from_yaml_part', ignore_case=True)) ) generate_crowd_anki: @@ -41,6 +42,17 @@ generate_guids_in_csvs: source: any(str(), list(str())) columns: any(str(), list(str())) +save_media_groups_to_folder: + parts: list(str()) + folder: str() + clear_folder: bool(required=False) + recursive: bool(required=False) + +save_note_models_to_folder: + parts: list(str()) + folder: str() + clear_existing: bool(required=False) + --- @@ -49,11 +61,12 @@ file_mapping: note_model: str(required=False) sort_by_columns: list(str(), required=False) reverse_sort: bool(required=False) + case_insensitive_sort: bool(required=False) derivatives: list(include('file_mapping'), required=False) headers_from_crowd_anki: - source: str() part_id: str() + source: str() save_to_file: str(required=False) headers_from_yaml_part: @@ -66,16 +79,16 @@ headers_override: media_group_from_crowd_anki: part_id: str() - save_to_file: str(required=False) source: str() + save_to_file: str(required=False) recursive: bool(required=False) filter_whitelist_from_parts: list(str(), required=False) filter_blacklist_from_parts: list(str(), required=False) media_group_from_folder: part_id: str() - save_to_file: str(required=False) source: str() + save_to_file: str(required=False) recursive: bool(required=False) filter_whitelist_from_parts: list(str(), required=False) filter_blacklist_from_parts: list(str(), required=False) @@ -94,6 +107,12 @@ note_model_field: is_sticky: bool(required=False) is_right_to_left: bool(required=False) +note_model_from_crowd_anki: + part_id: str() + source: str() + model_name: str(required=False) + save_to_file: str(required=False) + note_model_from_html_parts: part_id: str() model_id: str() @@ -105,22 +124,11 @@ note_model_from_html_parts: note_model_mapping: note_models: any(list(str()), str()) - columns_to_fields: map(str(), key=str()) - personal_fields: list(str()) + columns_to_fields: map(str(), key=str(), required=False) + personal_fields: list(str(), required=False) -note_model_template_from_html: - part_id: str() - html_file: str() - template_name: str(required=False) - browser_html_file: str(required=False) - deck_override_id: int(required=False) - save_to_file: str(required=False) - -note_models_from_crowd_anki: +note_models_all_from_crowd_anki: source: str() - part_id: str() - model_name: str(required=False) - save_to_file: str(required=False) note_models_from_yaml_part: part_id: str() @@ -133,8 +141,8 @@ note_models_to_crowd_anki_item: part_id: str() notes_from_crowd_anki: - source: str() part_id: str() + source: str() sort_order: list(str(), required=False) save_to_file: str(required=False) reverse_sort: str(required=False) @@ -158,9 +166,4 @@ notes_to_crowd_anki: reverse_sort: bool(required=False) additional_items_to_add: map(str(), key=str(), required=False) override: include('notes_override', required=False) - -save_media_groups_to_folder: - parts: list(str()) - folder: str() - clear_folder: bool(required=False) - recursive: bool(required=False) + case_insensitive_sort: bool(required=False) diff --git a/brain_brew/transformers/media_group_from_location.py b/brain_brew/transformers/create_media_group_from_location.py similarity index 100% rename from brain_brew/transformers/media_group_from_location.py rename to brain_brew/transformers/create_media_group_from_location.py diff --git a/brain_brew/transformers/file_mapping.py b/brain_brew/transformers/file_mapping.py index ef9c8ad..ac89c87 100644 --- a/brain_brew/transformers/file_mapping.py +++ b/brain_brew/transformers/file_mapping.py @@ -5,7 +5,7 @@ from brain_brew.configuration.representation_base import RepresentationBase from brain_brew.interfaces.yamale_verifyable import YamlRepr from brain_brew.representation.generic.csv_file import CsvFile, CsvKeys -from brain_brew.utils import single_item_to_list, generate_anki_guid +from brain_brew.utils import single_item_to_list FILE = "csv_file" NOTE_MODEL = "note_model" @@ -27,6 +27,7 @@ def yamale_schema(cls) -> str: note_model: str(required=False) sort_by_columns: list(str(), required=False) reverse_sort: bool(required=False) + case_insensitive_sort: bool(required=False) derivatives: list(include('{cls.task_name()}'), required=False) ''' @@ -38,34 +39,40 @@ class Representation(RepresentationBase): reverse_sort: Optional[bool] derivatives: Optional[List['FileMappingDerivative.Representation']] - def __init__(self, file, note_model=None, sort_by_columns=None, reverse_sort=None, derivatives=None): + def __init__(self, file, note_model=None, sort_by_columns=None, reverse_sort=None, case_insensitive_sort=None, derivatives=None): self.file = file self.note_model = note_model self.sort_by_columns = sort_by_columns self.reverse_sort = reverse_sort + self.case_insensitive_sort = case_insensitive_sort self.derivatives = list(map(FileMappingDerivative.Representation.from_dict, derivatives)) \ if derivatives is not None else [] - compiled_data: Dict[str, dict] = field(init=False) - - csv_file: CsvFile - - note_model: Optional[str] - sort_by_columns: Optional[list] - reverse_sort: Optional[bool] - derivatives: Optional[List['FileMappingDerivative']] - @classmethod def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) return cls( + rep=rep, csv_file=CsvFile.create_or_get(rep.file), note_model=rep.note_model.strip() if rep.note_model else None, sort_by_columns=single_item_to_list(rep.sort_by_columns), reverse_sort=rep.reverse_sort or False, + case_insensitive_sort=rep.case_insensitive_sort or True, derivatives=list(map(cls.from_repr, rep.derivatives)) if rep.derivatives is not None else [] ) + rep: Representation + + compiled_data: Dict[str, dict] = field(init=False) + + csv_file: CsvFile + + note_model: Optional[str] + sort_by_columns: list + reverse_sort: bool + case_insensitive_sort: bool + derivatives: Optional[List['FileMappingDerivative']] + def get_available_columns(self): return self.csv_file.column_headers + [col for der in self.derivatives for col in der.get_available_columns()] @@ -114,7 +121,7 @@ def _build_data_recursive(self) -> List[dict]: def write_to_csv(self, data_to_set): self.csv_file.set_data_from_superset(data_to_set) - self.csv_file.sort_data(self.sort_by_columns, self.reverse_sort) + self.csv_file.sort_data(self.sort_by_columns, self.reverse_sort, self.case_insensitive_sort) self.csv_file.write_file() for der in self.derivatives: @@ -123,7 +130,7 @@ def write_to_csv(self, data_to_set): @dataclass class FileMapping(FileMappingDerivative): - note_model: str # Override Optional on Parent + note_model: str # Override Optional on Children data_set_has_changed: bool = field(init=False, default=False) @@ -164,8 +171,8 @@ def set_relevant_data(self, data_set: Dict[str, dict]): if changed > 0 or added > 0: self.data_set_has_changed = True - logging.info(f"Set {self.csv_file.file_location} data; changed {changed}, " - f"added {added}, while {unchanged} were identical") + logging.info(f"Set {self.csv_file.file_location} data; changed {changed}, " + f"added {added}, while {unchanged} were identical") def write_file_on_close(self): if self.data_set_has_changed: diff --git a/brain_brew/transformers/note_model_mapping.py b/brain_brew/transformers/note_model_mapping.py index 0e2e442..f27dce8 100644 --- a/brain_brew/transformers/note_model_mapping.py +++ b/brain_brew/transformers/note_model_mapping.py @@ -1,6 +1,6 @@ -from dataclasses import dataclass +from dataclasses import dataclass, field from enum import Enum -from typing import List, Union, Dict +from typing import List, Union, Dict, Optional from brain_brew.configuration.part_holder import PartHolder from brain_brew.configuration.representation_base import RepresentationBase @@ -44,36 +44,35 @@ def task_name(cls) -> str: def yamale_schema(cls) -> str: return f'''\ note_models: any(list(str()), str()) - columns_to_fields: map(str(), key=str()) - personal_fields: list(str()) + columns_to_fields: map(str(), key=str(), required=False) + personal_fields: list(str(), required=False) ''' @dataclass class Representation(RepresentationBase): note_models: Union[str, list] - columns_to_fields: Dict[str, str] - personal_fields: List[str] + columns_to_fields: Optional[Dict[str, str]] = field(default=None) + personal_fields: List[str] = field(default_factory=lambda: []) note_models: Dict[str, PartHolder[NoteModel]] - columns: List[FieldMapping] + columns_manually_mapped: List[FieldMapping] personal_fields: List[FieldMapping] - required_fields_definitions = [GUID, TAGS] - @classmethod def from_repr(cls, data: Union[Representation, dict]): rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) note_models = [PartHolder.from_file_manager(model) for model in single_item_to_list(rep.note_models)] return cls( - columns=[FieldMapping( + columns_manually_mapped=[FieldMapping( field_type=FieldMapping.FieldMappingType.COLUMN, - field_name=field, - value=key) for key, field in rep.columns_to_fields.items()], + field_name=f, + value=key) for key, f in rep.columns_to_fields.items()] + if rep.columns_to_fields else [], personal_fields=[FieldMapping( field_type=FieldMapping.FieldMappingType.PERSONAL_FIELD, - field_name=field, - value="") for field in rep.personal_fields], + field_name=f, + value="") for f in rep.personal_fields], note_models=dict(map(lambda nm: (nm.part_id, nm), note_models)) ) @@ -81,36 +80,40 @@ def get_note_model_mapping_dict(self): return {model: self for model in self.note_models} def verify_contents(self): + if not self.columns_manually_mapped: # No check needed if no manual mapping is performed + return + errors = [] + required_field_definitions = [GUID, TAGS] - extra_fields = [field.field_name for field in self.columns - if field.field_name not in self.required_fields_definitions] + extra_fields = [field.field_name for field in self.columns_manually_mapped + if field.field_name not in required_field_definitions] for holder in self.note_models.values(): model: NoteModel = holder.part # Check for Required Fields missing = [] - for req in self.required_fields_definitions: - if req not in [field.field_name for field in self.columns]: + for req in required_field_definitions: + if req not in [field.field_name for field in self.columns_manually_mapped]: missing.append(req) if missing: - errors.append(KeyError(f"""Note model(s) "{holder.part_id}" to Csv config error: \ - Definitions for fields {missing} are required.""")) + errors.append(KeyError(f"""Error in note_model_mappings part with note model "{holder.part_id}". \ + When mapping columns_to_fields you must map all fields. \ + Mapping is missing for for fields: {missing}""")) # Check Fields Align with Note Type missing = model.check_field_overlap( - [field.field_name for field in self.columns - if field.field_name not in self.required_fields_definitions] + [field.field_name for field in self.columns_manually_mapped + if field.field_name not in required_field_definitions] ) missing = [m for m in missing if m not in [field.field_name for field in self.personal_fields]] if missing: - errors.append( - KeyError(f"Note model '{holder.part_id}' to Csv config error. " - f"It expected {[field.name for field in model.fields]} but was missing: {missing}") - ) + errors.append(KeyError(f"""Error in note_model_mappings part with note model "{holder.part_id}". \ + When mapping columns_to_fields you must map all fields. \ + Mapping is missing for for fields: {missing}""")) # Find mappings which do not exist on any note models if extra_fields: @@ -118,17 +121,18 @@ def verify_contents(self): if extra_fields: errors.append( - KeyError(f"Field(s) '{extra_fields} are defined as mappings, but match no Note Model's field")) + KeyError(f"""Error in note_model_mappings part. \ + Field(s) '{extra_fields}' are defined as mappings, but match no Note Model fields""")) if errors: raise Exception(errors) def csv_row_map_to_note_fields(self, row: dict) -> dict: - relevant_row_data = self.get_relevant_data(row) + relevant_row_data = self.filter_data_row_by_relevant_columns(row) for pf in self.personal_fields: # Add in Personal Fields relevant_row_data.setdefault(pf.field_name, False) - for column in self.columns: # Rename from Csv Column to Note Type Field + for column in self.columns_manually_mapped: # Rename from Csv Column to Note Type Field if column.value in relevant_row_data: relevant_row_data[column.field_name] = relevant_row_data.pop(column.value) @@ -138,25 +142,24 @@ def csv_headers_map_to_note_fields(self, row: list) -> list: return list(self.csv_row_map_to_note_fields({row_name: "" for row_name in row}).keys()) def note_fields_map_to_csv_row(self, row): - for column in self.columns: # Rename from Note Type Field to Csv Column + for column in self.columns_manually_mapped: # Rename from Note Type Field to Csv Column if column.field_name in row: row[column.value] = row.pop(column.field_name) - for pf in self.personal_fields: # Remove Personal Fields if pf.field_name in row: del row[pf.field_name] - relevant_row_data = self.get_relevant_data(row) + relevant_row_data = self.filter_data_row_by_relevant_columns(row) return relevant_row_data - def get_relevant_data(self, row): - relevant_columns = [field.value for field in self.columns] - if not relevant_columns: - return [] - + def filter_data_row_by_relevant_columns(self, row): cols = list(row.keys()) + relevant_columns = [f.value for f in self.columns_manually_mapped] + if not relevant_columns: + return row + # errors = [KeyError(f"Missing column {rel_col}") for rel_col in relevant_columns if rel_col not in cols] # if errors: # raise Exception(errors) diff --git a/brain_brew/transformers/media_group_save_to_location.py b/brain_brew/transformers/save_media_group_to_location.py similarity index 100% rename from brain_brew/transformers/media_group_save_to_location.py rename to brain_brew/transformers/save_media_group_to_location.py diff --git a/brain_brew/transformers/save_note_model_to_location.py b/brain_brew/transformers/save_note_model_to_location.py new file mode 100644 index 0000000..a426f80 --- /dev/null +++ b/brain_brew/transformers/save_note_model_to_location.py @@ -0,0 +1,46 @@ +import logging +import os +from typing import List + +from brain_brew.representation.generic.html_file import HTMLFile +from brain_brew.representation.yaml.note_model import NoteModel, CSS_FILE, TEMPLATES +from brain_brew.representation.yaml.note_model_template import HTML_FILE as TEMPLATE_HTML_FILE, NAME as TEMPLATE_NAME, BROWSER_HTML_FILE as TEMPLATE_BROWSER_HTML_FILE +from brain_brew.representation.yaml.yaml_object import YamlObject +from brain_brew.utils import create_path_if_not_exists, clear_contents_of_folder + + +def save_note_model_to_location( + model: NoteModel, + folder: str, + clear_folder: bool +) -> str: + + nm_folder = os.path.join(folder, model.name + '/') + create_path_if_not_exists(nm_folder) + + if clear_folder: + clear_contents_of_folder(nm_folder) + + model_encoded = model.encode_as_part_with_empty_file_references() + + model_encoded[CSS_FILE.name] = os.path.join(nm_folder, "style.css") + HTMLFile.write_file(model_encoded[CSS_FILE.name], model.css) + + templates_dict = {t.name: t for t in model.templates} + + for template_data in model_encoded[TEMPLATES.name]: + name = template_data[TEMPLATE_NAME.name] + template = templates_dict[name] + t_data, b_t_data = template.get_template_files_data() + + template_data[TEMPLATE_HTML_FILE.name] = os.path.join(nm_folder, HTMLFile.to_filename_html(name)) + HTMLFile.write_file(template_data[TEMPLATE_HTML_FILE.name], t_data) + + if TEMPLATE_BROWSER_HTML_FILE.name in template_data and b_t_data is not None: + template_data[TEMPLATE_BROWSER_HTML_FILE.name] = os.path.join(nm_folder, HTMLFile.to_filename_html(name + "_browser")) + HTMLFile.write_file(template_data[TEMPLATE_HTML_FILE.name], b_t_data) + + model_yaml_file_name = YamlObject.to_filename_yaml(os.path.join(nm_folder, model.name)) + YamlObject.dump_to_yaml_file(model_yaml_file_name, model_encoded) + + return model_yaml_file_name diff --git a/brain_brew/utils.py b/brain_brew/utils.py index 685c40d..d65e399 100644 --- a/brain_brew/utils.py +++ b/brain_brew/utils.py @@ -2,6 +2,7 @@ import os import random import re +import shutil import string from pathlib import Path from typing import List @@ -31,6 +32,14 @@ def filename_from_full_path(full_path): return re.findall(r'[^\\/:*?"<>|\r\n]+$', full_path)[0] +def folder_name_from_full_path(full_path): + return re.findall(r'[^\\/:*?"<>|\r\n]+[/]?$', full_path)[0] + + +def split_by_regex(str_to_split: str, pattern: str) -> List[str]: + return re.split(pattern, str_to_split) + + def find_media_in_field(field_value: str) -> List[str]: if not field_value: return [] @@ -58,6 +67,18 @@ def create_path_if_not_exists(path): os.makedirs(dir_name, exist_ok=True) +def clear_contents_of_folder(path): + for filename in os.listdir(path): + file_path = os.path.join(path, filename) + try: + if os.path.isfile(file_path) or os.path.islink(file_path): + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + except Exception as e: + print('Failed to delete %s. Reason: %s' % (file_path, e)) + + def split_tags(tags_value: str) -> list: split = [entry.strip() for entry in re.split(r';\s*|,\s*|\s+', tags_value)] while "" in split: @@ -66,8 +87,7 @@ def split_tags(tags_value: str) -> list: def join_tags(tags_list: list) -> str: - from brain_brew.configuration.global_config import GlobalConfig - return GlobalConfig.get_instance().join_values_with.join(tags_list) + return ", ".join(tags_list) # TODO: Make configurable def generate_anki_guid() -> str: @@ -91,11 +111,7 @@ def base91(num: int) -> str: return base91(random.randint(0, 2 ** 64 - 1)) -def sort_dict(data, sort_by_keys, reverse_sort, case_insensitive_sort=None): - from brain_brew.configuration.global_config import GlobalConfig - if case_insensitive_sort is None: - case_insensitive_sort = GlobalConfig.get_instance().sort_case_insensitive - +def sort_dict(data, sort_by_keys, reverse_sort, case_insensitive_sort): if sort_by_keys: if case_insensitive_sort: def sort_method(i): diff --git a/requirements.txt b/requirements.txt index abe389e..29bb7b7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,18 +1,38 @@ args==0.1.0 -attrs==20.1.0 +attrs==20.3.0 +bleach==3.3.0 +certifi==2020.12.5 +cffi==1.14.4 +chardet==4.0.0 clint==0.5.1 +colorama==0.4.4 coverage==4.5.4 -importlib-metadata==1.7.0 -more-itertools==8.5.0 -packaging==20.4 +cryptography==3.3.2 +docutils==0.16 +idna==2.10 +jeepney==0.6.0 +keyring==22.0.1 +more-itertools==8.7.0 +packaging==20.9 +pkginfo==1.7.0 pluggy==0.13.1 -py==1.9.0 +py==1.10.0 +pycparser==2.20 +Pygments==2.7.4 pyparsing==2.4.7 pytest==5.4.1 -PyYAML==5.3.1 +readme-renderer==28.0 +requests==2.25.1 +requests-toolbelt==0.9.1 +rfc3986==1.4.0 ruamel.yaml==0.16.10 ruamel.yaml.clib==0.2.2 +SecretStorage==3.3.1 six==1.15.0 +tqdm==4.56.1 +twine==3.3.0 +urllib3==1.26.3 wcwidth==0.2.5 +webencodings==0.5.1 yamale==3.0.4 -zipp==3.1.0 +zipp==3.4.0 diff --git a/scripts/yamale_build.py b/scripts/yamale_build.py index a429b03..c5eba2c 100644 --- a/scripts/yamale_build.py +++ b/scripts/yamale_build.py @@ -3,7 +3,7 @@ sys.path.append(os.path.abspath('')) -from brain_brew.configuration.build_config.top_level_builder import TopLevelBuilder +from brain_brew.commands.run_recipe.top_level_builder import TopLevelBuilder build: str = TopLevelBuilder.build_yamale() filepath = "brain_brew/schemas/recipe.yaml" diff --git a/setup.py b/setup.py index 9b33d30..731695a 100644 --- a/setup.py +++ b/setup.py @@ -1,11 +1,12 @@ import setuptools +from brain_brew.front_matter import latest_version_number with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="Brain-Brew", - version="0.3.2", + version=latest_version_number(), author="Jordan Munch O'Hare", author_email="brainbrew@jordan.munchohare.com", description="Automated Anki flashcard creation and extraction to/from Csv ", diff --git a/tests/representation/yaml/test_note_model_repr.py b/tests/representation/yaml/test_note_model_repr.py index a6a3cad..a5ae878 100644 --- a/tests/representation/yaml/test_note_model_repr.py +++ b/tests/representation/yaml/test_note_model_repr.py @@ -9,11 +9,12 @@ # CrowdAnki Files -------------------------------------------------------------------------- +from tests.test_helpers import debug_write_part_to_file @pytest.fixture def ca_nm_data_word(): - return JsonFile.read_file(TestFiles.CrowdAnkiNoteModels.LL_WORD_COMPLETE) + return JsonFile.read_file(TestFiles.CrowdAnkiNoteModels.LL_WORD) @pytest.fixture @@ -23,7 +24,7 @@ def ca_nm_word(ca_nm_data_word) -> NoteModel: @pytest.fixture def ca_nm_data_word_required_only(): - return JsonFile.read_file(TestFiles.CrowdAnkiNoteModels.LL_WORD_COMPLETE_ONLY_REQUIRED) + return JsonFile.read_file(TestFiles.CrowdAnkiNoteModels.LL_WORD_ONLY_REQUIRED) @pytest.fixture @@ -33,7 +34,7 @@ def ca_nm_word_required_only(ca_nm_data_word_required_only) -> NoteModel: @pytest.fixture def ca_nm_data_word_no_defaults(): - return JsonFile.read_file(TestFiles.CrowdAnkiNoteModels.LL_WORD_COMPLETE_NO_DEFAULTS) + return JsonFile.read_file(TestFiles.CrowdAnkiNoteModels.LL_WORD_NO_DEFAULTS) @pytest.fixture @@ -94,6 +95,7 @@ def test_normal(self, ca_nm_word, ca_nm_data_word): model = ca_nm_word encoded = model.encode_as_crowdanki() + # JsonFile.write_file(TestFiles.CrowdAnkiNoteModels.LL_WORD, encoded) assert encoded == ca_nm_data_word @@ -121,7 +123,6 @@ def test_only_required_uses_defaults(self, ca_nm_word_no_defaults, ca_nm_data_wo encoded = model.encode() - # debug_write_part_to_file(model, TestFiles.NoteModels.LL_WORD_NO_DEFAULTS) assert encoded != ca_nm_data_word_no_defaults assert encoded == nm_data_word_no_defaults diff --git a/tests/test_argument_reader.py b/tests/test_argument_reader.py index 43827b1..089e77e 100644 --- a/tests/test_argument_reader.py +++ b/tests/test_argument_reader.py @@ -1,14 +1,14 @@ -from argparse import ArgumentParser +from argparse import ArgumentParser, ArgumentError from unittest.mock import patch import pytest -from brain_brew.configuration.argument_reader import BBArgumentReader +from brain_brew.commands.argument_reader import BBArgumentReader, Commands @pytest.fixture() def arg_reader_test1(): - return BBArgumentReader() + return BBArgumentReader(test_mode=True) def test_constructor(arg_reader_test1): @@ -17,31 +17,35 @@ def test_constructor(arg_reader_test1): class TestArguments: - @pytest.mark.parametrize("arguments", [ - (["test_recipe.yaml", "--config"]), - (["test_recipe.yaml", "config_file.yaml", "--config"]), - (["--config", "config_file.yaml"]), - ([""]), - ([]) - ]) - def test_broken_arguments(self, arg_reader_test1, arguments): - def raise_exit(message): - raise SystemExit - - with pytest.raises(SystemExit): - with patch.object(BBArgumentReader, "error", side_effect=raise_exit): - parsed_args = arg_reader_test1.get_parsed(arguments) - - @pytest.mark.parametrize("arguments, recipe, config_file, verify_only", [ - (["test_recipe.yaml"], "test_recipe.yaml", None, False), - (["test_recipe.yaml", "--verify"], "test_recipe.yaml", None, True), - (["test_recipe.yaml", "-v"], "test_recipe.yaml", None, True), - (["test_recipe.yaml", "--config", "other_config.yaml"], "test_recipe.yaml", "other_config.yaml", False), - (["test_recipe.yaml", "--config", "other_config.yaml", "-v"], "test_recipe.yaml", "other_config.yaml", True), - ]) - def test_correct_arguments(self, arg_reader_test1, arguments, recipe, config_file, verify_only): - parsed_args = arg_reader_test1.parse_args(arguments) - - assert parsed_args.recipe == recipe - assert parsed_args.config_file == config_file - assert parsed_args.verify_only == verify_only + class CommandRun: + @pytest.mark.parametrize("arguments", [ + ([Commands.RUN_RECIPE.value]), + ([Commands.RUN_RECIPE.value, ""]), + ]) + def test_broken_arguments(self, arg_reader_test1, arguments): + def raise_exit(message): + raise SystemExit + + with pytest.raises(SystemExit): + with patch.object(BBArgumentReader, "error", side_effect=raise_exit): + arg_reader_test1.get_parsed(arguments) + + @pytest.mark.parametrize("arguments, recipe, verify_only", [ + ([Commands.RUN_RECIPE.value, "test_recipe.yaml"], "test_recipe.yaml", False), + ([Commands.RUN_RECIPE.value, "test_recipe.yaml", "--verify"], "test_recipe.yaml", True), + ([Commands.RUN_RECIPE.value, "test_recipe.yaml", "-v"], "test_recipe.yaml", True), + ]) + def test_correct_arguments(self, arg_reader_test1, arguments, recipe, verify_only): + parsed_args = arg_reader_test1.parse_args(arguments) + + assert parsed_args.recipe == recipe + assert parsed_args.verify_only == verify_only + + class CommandInit: + @pytest.mark.parametrize("arguments, location", [ + (["init", "crowdankifolder72"], "crowdankifolder72"), + ]) + def test_correct_arguments(self, arg_reader_test1, arguments, location): + parsed_args = arg_reader_test1.parse_args(arguments) + + assert parsed_args.crowdanki_folder == location diff --git a/tests/test_files.py b/tests/test_files.py index 2e811b5..e90b443 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -20,14 +20,12 @@ class CrowdAnkiNoteModels: LOC = "tests/test_files/deck_parts/note_models/" TEST = "Test Model" - TEST_COMPLETE = LOC + "Test-Model.json" - LL_WORD = "LL Word" - LL_WORD_COMPLETE = LOC + "LL-Word.json" + LL_WORD = LOC + "LL Word" - LL_WORD_COMPLETE_ONLY_REQUIRED = LOC + "LL-Word-Only-Required.json" + LL_WORD_ONLY_REQUIRED = LOC + "LL Word Only Required" - LL_WORD_COMPLETE_NO_DEFAULTS = LOC + "LL-Word-No-Defaults.json" + LL_WORD_NO_DEFAULTS = LOC + "LL Word No Defaults" class NoteModels: LOC = "tests/test_files/deck_parts/yaml/note_models/" diff --git a/tests/test_files/deck_parts/note_models/LL-Word-No-Defaults.json b/tests/test_files/deck_parts/note_models/LL Word No Defaults.json similarity index 89% rename from tests/test_files/deck_parts/note_models/LL-Word-No-Defaults.json rename to tests/test_files/deck_parts/note_models/LL Word No Defaults.json index 432be61..bc20584 100644 --- a/tests/test_files/deck_parts/note_models/LL-Word-No-Defaults.json +++ b/tests/test_files/deck_parts/note_models/LL Word No Defaults.json @@ -70,63 +70,7 @@ "latexPost": "\\end{document}TEST", "latexPre": "\\documentclass[12pt]{article}\n\\special{papersize=3in,5in}\n\\usepackage{amssymb,amsmath}\n\\pagestyle{empty}\n\\setlength{\\parindent}{0in}\n\\begin{document}\nTEST", "name": "LL Word", - "req": [ - [ - 0, - "all", - [ - 1 - ] - ], - [ - 1, - "all", - [ - 2 - ] - ], - [ - 2, - "all", - [ - 1, - 3 - ] - ], - [ - 3, - "all", - [ - 2, - 3 - ] - ], - [ - 4, - "all", - [ - 1, - 3 - ] - ], - [ - 5, - "all", - [ - 2, - 3 - ] - ], - [ - 6, - "all", - [ - 1, - 2, - 3 - ] - ] - ], + "req": [], "sortf": 1, "tags": ["TEST"], "tmpls": [ diff --git a/tests/test_files/deck_parts/note_models/LL-Word-Only-Required.json b/tests/test_files/deck_parts/note_models/LL Word Only Required.json similarity index 86% rename from tests/test_files/deck_parts/note_models/LL-Word-Only-Required.json rename to tests/test_files/deck_parts/note_models/LL Word Only Required.json index a9ed2b3..e9becab 100644 --- a/tests/test_files/deck_parts/note_models/LL-Word-Only-Required.json +++ b/tests/test_files/deck_parts/note_models/LL Word Only Required.json @@ -40,63 +40,6 @@ } ], "name": "LL Word", - "req": [ - [ - 0, - "all", - [ - 1 - ] - ], - [ - 1, - "all", - [ - 2 - ] - ], - [ - 2, - "all", - [ - 1, - 3 - ] - ], - [ - 3, - "all", - [ - 2, - 3 - ] - ], - [ - 4, - "all", - [ - 1, - 3 - ] - ], - [ - 5, - "all", - [ - 2, - 3 - ] - ], - [ - 6, - "all", - [ - 1, - 2, - 3 - ] - ] - ], "tmpls": [ { "afmt": "{{#X Word}}\n\t{{X Word}}\n{{/X Word}}\n\n
\n\n{{Picture}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", diff --git a/tests/test_files/deck_parts/note_models/LL-Word.json b/tests/test_files/deck_parts/note_models/LL Word.json similarity index 87% rename from tests/test_files/deck_parts/note_models/LL-Word.json rename to tests/test_files/deck_parts/note_models/LL Word.json index 6133160..cc48cfa 100644 --- a/tests/test_files/deck_parts/note_models/LL-Word.json +++ b/tests/test_files/deck_parts/note_models/LL Word.json @@ -69,129 +69,95 @@ ], "latexPost": "\\end{document}", "latexPre": "\\documentclass[12pt]{article}\n\\special{papersize=3in,5in}\n\\usepackage{amssymb,amsmath}\n\\pagestyle{empty}\n\\setlength{\\parindent}{0in}\n\\begin{document}\n", + "latexsvg": false, "name": "LL Word", - "req": [ - [ - 0, - "all", - [ - 1 - ] - ], - [ - 1, - "all", - [ - 2 - ] - ], - [ - 2, - "all", - [ - 1, - 3 - ] - ], - [ - 3, - "all", - [ - 2, - 3 - ] - ], - [ - 4, - "all", - [ - 1, - 3 - ] - ], - [ - 5, - "all", - [ - 2, - 3 - ] - ], - [ - 6, - "all", - [ - 1, - 2, - 3 - ] - ] - ], + "req": [], "sortf": 0, "tags": [], "tmpls": [ { "afmt": "{{#X Word}}\n\t{{X Word}}\n{{/X Word}}\n\n
\n\n{{Picture}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", "bafmt": "", + "bfont": "", "bqfmt": "", + "bsize": 0, "did": null, "name": "X Comprehension", "ord": 0, - "qfmt": "{{#X Word}}\n\t{{text:X Word}}\n{{/X Word}}" + "qfmt": "{{#X Word}}\n\t{{text:X Word}}\n{{/X Word}}", + "scratchPad": 0 }, { "afmt": "{{#Y Word}}\n\t{{Y Word}}\n{{/Y Word}}\n\n
\n\n{{Picture}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", "bafmt": "", + "bfont": "", "bqfmt": "", + "bsize": 0, "did": null, "name": "Y Comprehension", "ord": 1, - "qfmt": "{{#Y Word}}\n\t{{text:Y Word}}\n{{/Y Word}}" + "qfmt": "{{#Y Word}}\n\t{{text:Y Word}}\n{{/Y Word}}", + "scratchPad": 0 }, { "afmt": "{{FrontSide}}\n\n
\n\n{{X Word}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", "bafmt": "", + "bfont": "", "bqfmt": "", + "bsize": 0, "did": null, "name": "X Production", "ord": 2, - "qfmt": "{{#X Word}}{{#Picture}}\n\t{{Picture}}\n{{/Picture}}{{/X Word}}" + "qfmt": "{{#X Word}}{{#Picture}}\n\t{{Picture}}\n{{/Picture}}{{/X Word}}", + "scratchPad": 0 }, { "afmt": "{{FrontSide}}\n\n
\n\n{{Y Word}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", "bafmt": "", + "bfont": "", "bqfmt": "", + "bsize": 0, "did": null, "name": "Y Production", "ord": 3, - "qfmt": "{{#Y Word}}{{#Picture}}\n\t{{Picture}}\n{{/Picture}}{{/Y Word}}" + "qfmt": "{{#Y Word}}{{#Picture}}\n\t{{Picture}}\n{{/Picture}}{{/Y Word}}", + "scratchPad": 0 }, { "afmt": "{{FrontSide}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", "bafmt": "", + "bfont": "", "bqfmt": "", + "bsize": 0, "did": null, "name": "X Spelling", "ord": 4, - "qfmt": "{{#X Word}}\n\t
Spell this word:
\n\n\t
{{type:X Word}}
\n\n\t
{{Picture}}\n{{/X Word}}" + "qfmt": "{{#X Word}}\n\t
Spell this word:
\n\n\t
{{type:X Word}}
\n\n\t
{{Picture}}\n{{/X Word}}", + "scratchPad": 0 }, { "afmt": "{{FrontSide}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", "bafmt": "", + "bfont": "", "bqfmt": "", + "bsize": 0, "did": null, "name": "Y Spelling", "ord": 5, - "qfmt": "{{#Y Word}}\n\t
Spell this word:
\n\n\t
{{type:Y Word}}
\n\n\t
{{Picture}}\n{{/Y Word}}" + "qfmt": "{{#Y Word}}\n\t
Spell this word:
\n\n\t
{{type:Y Word}}
\n\n\t
{{Picture}}\n{{/Y Word}}", + "scratchPad": 0 }, { "afmt": "{{FrontSide}}\n\n
\n\n
{{text:X Word}}
\n
{{text:Y Word}}
\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", "bafmt": "", + "bfont": "", "bqfmt": "", + "bsize": 0, "did": null, "name": "X and Y Production", "ord": 6, - "qfmt": "{{#X Word}}\n{{#Y Word}}\n\t{{Picture}}\n{{/Y Word}}\n{{/X Word}}" + "qfmt": "{{#X Word}}\n{{#Y Word}}\n\t{{Picture}}\n{{/Y Word}}\n{{/X Word}}", + "scratchPad": 0 } ], "type": 0, diff --git a/tests/test_files/deck_parts/yaml/note_models/LL-Word-No-Defaults.yaml b/tests/test_files/deck_parts/yaml/note_models/LL-Word-No-Defaults.yaml index 01853ac..f4dcb16 100644 --- a/tests/test_files/deck_parts/yaml/note_models/LL-Word-No-Defaults.yaml +++ b/tests/test_files/deck_parts/yaml/note_models/LL-Word-No-Defaults.yaml @@ -141,31 +141,4 @@ tags: version: - TEST __type__: NoteModelTEST -required_fields_per_template: -- - 0 - - all - - - 1 -- - 1 - - all - - - 2 -- - 2 - - all - - - 1 - - 3 -- - 3 - - all - - - 2 - - 3 -- - 4 - - all - - - 1 - - 3 -- - 5 - - all - - - 2 - - 3 -- - 6 - - all - - - 1 - - 2 - - 3 +required_fields_per_template: [] \ No newline at end of file diff --git a/tests/test_files/deck_parts/yaml/note_models/LL-Word-Only-Required.yaml b/tests/test_files/deck_parts/yaml/note_models/LL-Word-Only-Required.yaml index 5107b89..4128c83 100644 --- a/tests/test_files/deck_parts/yaml/note_models/LL-Word-Only-Required.yaml +++ b/tests/test_files/deck_parts/yaml/note_models/LL-Word-Only-Required.yaml @@ -76,31 +76,4 @@ templates: >{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording\ \ and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n\ {{/Extra}}" -required_fields_per_template: -- - 0 - - all - - - 1 -- - 1 - - all - - - 2 -- - 2 - - all - - - 1 - - 3 -- - 3 - - all - - - 2 - - 3 -- - 4 - - all - - - 1 - - 3 -- - 5 - - all - - - 2 - - 3 -- - 6 - - all - - - 1 - - 2 - - 3 +required_fields_per_template: [] \ No newline at end of file diff --git a/tests/test_utils.py b/tests/test_utils.py index 86e2546..a64ce90 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,6 +1,7 @@ import pytest -from brain_brew.utils import find_media_in_field, str_to_lowercase_no_separators, split_tags +from brain_brew.representation.yaml.note_model_template import html_separator_regex +from brain_brew.utils import find_media_in_field, str_to_lowercase_no_separators, split_tags, split_by_regex class TestFindMedia: @@ -56,6 +57,22 @@ def test_runs(self, str_to_split, expected_result): assert split_tags(str_to_split) == expected_result +class TestSplitByRegex: + @pytest.mark.parametrize("str_to_split, split_by, expected_result", [ + ("testbabyhighfive", "baby", ["test", "highfive"]), + ("testbabyhighfive", "(baby)", ["test", "baby", "highfive"]), + ("testbabyhighfive", html_separator_regex, ["testbabyhighfive"]), + ("test\n---\nhighfive", html_separator_regex, ["test", "highfive"]), + ("test\n---\n\nhighfive", html_separator_regex, ["test", "highfive"]), + ("test\n-\nhighfive", html_separator_regex, ["test", "highfive"]), + ("test\n\n\n\n-\nhighfive", html_separator_regex, ["test", "highfive"]), + ("test\n\n\n\n---\n\n\n\nhighfive", html_separator_regex, ["test", "highfive"]), + ("test\n\n\n\n---\n\n\n\nhighfive\n\n--\n\nbackflip", html_separator_regex, ["test", "highfive", "backflip"]), + ]) + def test_runs(self, str_to_split, split_by, expected_result): + assert split_by_regex(str_to_split, split_by) == expected_result + + # class TestJoinTags: # @pytest.mark.parametrize("join_with, expected_result", [ # (", ", "test, test1, test2")