From bc4377a3b09ab075b2eca7f649b10c329d157deb Mon Sep 17 00:00:00 2001 From: Jeff Date: Thu, 29 Nov 2018 10:48:20 -0800 Subject: [PATCH] [src] - adding emr resource [src] - adding ocean resource [src] - adjusting docs [src] - add event subscription module [src] - add unit tests (pytest) --- .gitignore | 4 +- README.md | 502 +-------- ...eference.yml => argument_reference_eg.yml} | 200 ++-- docs/argument_reference_emr.yml | 78 ++ docs/argument_reference_ocean.yml | 74 ++ examples/elastigroup/README.md | 20 + .../elastigroup-additional-configurations.yml | 95 ++ .../elastigroup-advanced.yml | 0 .../{ => elastigroup}/elastigroup-basic.yml | 2 +- .../elastigroup/elastigroup-beanstalk.yml | 42 + .../elastigroup-code-deploy.yml | 24 +- .../elastigroup-docker-swarm.yml | 24 +- .../{ => elastigroup}/elastigroup-ecs.yml | 20 +- .../elastigroup-elasticbeanstalk.yml} | 26 +- .../elastigroup-kubernetes.yml | 19 +- .../elastigroup-load-balancers.yml | 5 - .../elastigroup-mlb-load-balancer.yml | 49 + .../{ => elastigroup}/elastigroup-nomad.yml | 23 +- .../elastigroup-persistance.yml} | 0 .../{ => elastigroup}/elastigroup-rancher.yml | 0 .../{ => elastigroup}/elastigroup-route53.yml | 24 +- .../elastigroup-scaling-policies.yml | 0 .../elastigroup-scheduling.yml | 0 examples/elastigroup/elastigroup-stateful.yml | 42 + .../elastigroup-variable-retrieval.yml | 0 examples/emr/README.md | 4 + examples/emr/spotinst-emr.yml | 62 ++ examples/events/README.md | 4 + .../events/spotinst-event-subscription.yml | 17 + examples/ocean/README.md | 4 + examples/ocean/spotinst-ocean.yml | 49 + spotinst/__init__.py | 0 .../spotinst_aws_elastigroup.py | 946 ++++++++++++++--- spotinst/spotinst_event_subscription.py | 287 +++++ spotinst/spotinst_mrscaler.py | 985 ++++++++++++++++++ spotinst/spotinst_ocean_cloud.py | 614 +++++++++++ test/__init__.py | 0 test/test_spotinst_aws_elastigroup.py | 71 ++ test/test_spotinst_event_subscription.py | 35 + test/test_spotinst_mrscaler.py | 53 + test/test_spotinst_ocean_cloud.py | 43 + 41 files changed, 3600 insertions(+), 847 deletions(-) rename docs/{argument_reference.yml => argument_reference_eg.yml} (84%) create mode 100644 docs/argument_reference_emr.yml create mode 100644 docs/argument_reference_ocean.yml create mode 100644 examples/elastigroup/README.md create mode 100644 examples/elastigroup/elastigroup-additional-configurations.yml rename examples/{ => elastigroup}/elastigroup-advanced.yml (100%) rename examples/{ => elastigroup}/elastigroup-basic.yml (97%) create mode 100644 examples/elastigroup/elastigroup-beanstalk.yml rename examples/{ => elastigroup}/elastigroup-code-deploy.yml (70%) rename examples/{ => elastigroup}/elastigroup-docker-swarm.yml (71%) rename examples/{ => elastigroup}/elastigroup-ecs.yml (75%) rename examples/{elastigroup-elastic-beanstalk.yml => elastigroup/elastigroup-elasticbeanstalk.yml} (67%) rename examples/{ => elastigroup}/elastigroup-kubernetes.yml (76%) rename examples/{ => elastigroup}/elastigroup-load-balancers.yml (88%) create mode 100644 examples/elastigroup/elastigroup-mlb-load-balancer.yml rename examples/{ => elastigroup}/elastigroup-nomad.yml (74%) rename examples/{elastigroup-stateful.yml => elastigroup/elastigroup-persistance.yml} (100%) rename examples/{ => elastigroup}/elastigroup-rancher.yml (100%) rename examples/{ => elastigroup}/elastigroup-route53.yml (73%) rename examples/{ => elastigroup}/elastigroup-scaling-policies.yml (100%) rename examples/{ => elastigroup}/elastigroup-scheduling.yml (100%) create mode 100644 examples/elastigroup/elastigroup-stateful.yml rename examples/{ => elastigroup}/elastigroup-variable-retrieval.yml (100%) create mode 100644 examples/emr/README.md create mode 100644 examples/emr/spotinst-emr.yml create mode 100644 examples/events/README.md create mode 100644 examples/events/spotinst-event-subscription.yml create mode 100644 examples/ocean/README.md create mode 100644 examples/ocean/spotinst-ocean.yml create mode 100644 spotinst/__init__.py rename spotinst_aws_elastigroup.py => spotinst/spotinst_aws_elastigroup.py (59%) create mode 100644 spotinst/spotinst_event_subscription.py create mode 100644 spotinst/spotinst_mrscaler.py create mode 100644 spotinst/spotinst_ocean_cloud.py create mode 100644 test/__init__.py create mode 100644 test/test_spotinst_aws_elastigroup.py create mode 100644 test/test_spotinst_event_subscription.py create mode 100644 test/test_spotinst_mrscaler.py create mode 100644 test/test_spotinst_ocean_cloud.py diff --git a/.gitignore b/.gitignore index aa3559f..f72be12 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,8 @@ __pycache__/ # C extensions *.so +.DS_Store + # Distribution / packaging .Python env/ @@ -104,4 +106,4 @@ ENV/ *.xml *.iml -examples/*.retry \ No newline at end of file +examples/*/*.retry \ No newline at end of file diff --git a/README.md b/README.md index c09dd1e..54a5b7e 100644 --- a/README.md +++ b/README.md @@ -7,27 +7,33 @@ An Ansible Module for creating or deleting Spotinst Elastigroups * [Installation](#installation) * [Configuring Credentials](#configuring-credentials) * [Usage](#usage) - * [Elastigroup](#elastigroup) - * [Getting Started With Elastigroup](#getting-started-with-elastigroup) - * [Elastigroup Advanced](#elastigroup-advanced) - * [Elastigroup Additional Configurations](#elastigroup-additional-configurations) - * [Scaling](#scaling-policies) - * [Stateful](#stateful) - * [Scheduling](#scheduling) - * [Load Balancing](#load-balancing) - * [Third Party Integrations](#third-party-integrations) - * [ECS](#[ecs](examples/elastigroup-ecs.yml)) - * [Kubernetes](#[kubernetes](examples/elastigroup-kubernetes.yml)) - * [Nomad](#[nomad](examples/elastigroup-nomad.yml)) - * [Docker Swarm](#[docker-swarm](examples/elastigroup-docker-swarm.yml)) - * [CodeDeploy](#[code-deploy](examples/elastigroup-code-deploy.yml)) - * [Route53](#[route53](examples/elastigroup-route53.yml)) - * [ElasticBeanstalk](#[elastic-beanstalk](examples/elastigroup-elastic-beanstalk.yml)) - * [Rancher](#[rancher](examples/elastigroup-rancher.yml)) + * Examples: + * [Elastigroup](./examples/elastigroup) + * [Getting Started With Elastigroup](./examples/elastigroup/elastigroup-basic.yml) + * [Elastigroup Advanced](./examples/elastigroup/elastigroup-advanced.yml) + * [Elastigroup Additional Configurations](./examples/elastigroup/elastigroup-additional-configurations.yml) + * [Scaling](./examples/elastigroup/elastigroup-scaling-policies.yml) + * [Stateful](./examples/elastigroup/elastigroup-stateful.yml) + * [Scheduling](./examples/elastigroup/elastigroup-scheduling.yml) + * [Load Balancing](./examples/elastigroup/elastigroup-load-balancers.yml) + * Third Party Integrations + * [ECS](./examples/elastigroup/elastigroup-ecs.yml) + * [Kubernetes](./examples/elastigroup/elastigroup-kubernetes.yml) + * [Nomad](./examples/elastigroup/elastigroup-nomad.yml) + * [Docker Swarm](./examples/elastigroup/elastigroup-docker-swarm.yml) + * [CodeDeploy](./examples/elastigroup/elastigroup-code-deploy.yml) + * [Route53](./examples/elastigroup/elastigroup-route53.yml) + * [ElasticBeanstalk](./examples/elastigroup/elastigroup-elasticbeanstalk.yml) + * [EMR](./examples/emr) + * [Create EMR Cluster](./examples/emr/spotinst-emr.yml) + * [Ocean](./examples/ocean) + * [Create Ocean Cluster](./examples/ocean/spotinst-ocean.yml) + * [Event Subscription](./examples/events) + * [Create Event Subscription](./examples/events/spotinst-event-subscription.yml) ## Requirements -- [spotinst-sdk-python](https://github.com/spotinst/spotinst-sdk-python) >= `v1.0.29` +- [spotinst-sdk-python](https://github.com/spotinst/spotinst-sdk-python) >= `v1.0.44` ## Installation If you'd like to work with this version of the module and not the supplied version that is packaged with Ansible, @@ -44,19 +50,16 @@ The order in which the sdk searches for credentials is: 1. Fetching the account and token from environment variables under `SPOTINST_ACCOUNT` & `SPOTINST_TOKEN` If you choose to not pass your credentials directly you configure a credentials file, this file should be a valid `.yml` file. -The default shared credential file location is `~/.spotinst/credentials` and the default profile is `default` +The default shared credential file location is `~/.spotinst/credentials` - example ```yaml default: #profile token: $defaul_spotinst_token account: $default_spotinst-account-id -my_profle: - token: $my_spotinst_token - account: $my_spotinst-account-id ``` - 2. You can overwrite the credentials file location and the profile used as parameters `credentials_path` and/or `profile` inside the playbook + 2. You can overwrite the credentials file location and the profile used as parameters `credentials_path` inside the playbook - example ```yaml @@ -67,12 +70,10 @@ my_profle: name: ansible_test_group state: present credentials_path: /path/to/file - profile: my_profile ... ``` - 3. You can overwrite the credentials file location and the profile used as environment variables `SPOTINST_PROFILE` and/or `SPOTINST_SHARED_CREDENTIALS_FILE` - 4. Fetching from the default location with the default profile + 3. You can overwrite the credentials file location used as environment variables `SPOTINST_PROFILE` and/or `SPOTINST_SHARED_CREDENTIALS_FILE` ## Usage ```bash @@ -80,453 +81,10 @@ ansible-playbook elastigroup-basic.yml ``` ### Argument Reference -- [spotinst_aws_elastigroup](docs/argument_reference.yml) +- [spotinst_aws_elastigroup](./docs/argument_reference_eg.yml) +- [spotinst_mrScaler](./docs/argument_reference_emr.yml) +- [spotinst_ocean_cloud](./docs/argument_reference_ocean.yml) More information can be found in the official Ansible [documentation](https://docs.ansible.com/ansible/latest/modules/spotinst_aws_elastigroup_module.html#spotinst-aws-elastigroup-module) page as well as in the spotinst [documentation](https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-). -## Elastigroup - -### Getting Started With Elastigroup -In this basic example, we create a simple elastigroup -```yaml -- hosts: localhost - tasks: - - name: create elastigroup - spotinst_aws_elastigroup: - name: ansible_test_group - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - image_id: test-ami - key_pair: test-key-pair - max_size: 2 - min_size: 0 - target: 0 - unit: instance - monitoring: False - on_demand_instance_type: m4.large - product: Linux/UNIX - tags: - - Name: ansible_test_group - security_group_ids: - - sg-default - spot_instance_types: - - m4.xlarge - - m5.xlarge - do_not_update: - - image_id - - target - - user_data - register: result - - debug: var=result - -``` - -### Elastigroup Advanced -In this advanced example, we create an elastigroup with -- user data and shutdown script -- multiple EBS device mappings for the instances in this group -- network interfaces configuration for the instances in this group -- revert to spot configuration, which is the time frame at which Spotinst tries to spin spots instead of on-demands -- preferred availability zones in which to spin instances -- preferred spot instance types to launch - -```yaml -- hosts: localhost - tasks: - - name: create elastigroup - spotinst_aws_elastigroup: - name: ansible_test_group - state: present - risk: 100 - availability_vs_cost: balanced - revert_to_spot: - perform_at: timeWindow - time_windows: - - "Sun:11:00-Mon:12:00" - - "Mon:03:00-Wed:02:30" - availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - - name: us-east-2b - subnet_id: subnet-123b - - name: us-east-2a - subnet_id: subnet-123a - image_id: test-ami - key_pair: test-key-pair - max_size: 2 - min_size: 0 - target: 0 - unit: instance - monitoring: False - on_demand_instance_type: m4.large - product: Linux/UNIX - user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - preferred_availability_zones: - - us-east-2c - block_device_mappings: - - device_name: '/dev/xvda' - ebs: - volume_size: 60 - volume_type: gp2 - - device_name: '/dev/xvdb' - ebs: - volume_size: 120 - volume_type: gp2 - - device_name: '/dev/xvdc' - virtual_name: ephemeral0 - network_interfaces: - - description: test-eni - device_index: 0 - associate_public_ip_address: true - delete_on_termination: true - associate_ipv6_address: false - - description: test-eni - device_index: 1 - associate_public_ip_address: false - delete_on_termination: true - associate_ipv6_address: false - tags: - - Name: ansible_test_group - - Environment: dev - security_group_ids: - - sg-default - spot_instance_types: - - m4.xlarge - - m5.xlarge - preferred_spot_instance_types: - - m4.xlarge - do_not_update: - - image_id - - target - - user_data - register: result - - debug: var=result -``` - -### Elastigroup Additional Configurations -#### Scaling Policies -- Scale your elastigroup using up/down and target tracking scaling policies with a variety of adjustment operations -```yaml -- hosts: localhost - tasks: - - name: create elastigroup - spotinst_aws_elastigroup: - name: ansible_test_group - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - - name: us-east-2b - subnet_id: subnet-123b - - name: us-east-2a - subnet_id: subnet-123a - image_id: test-ami - key_pair: test-key-pair - max_size: 2 - min_size: 0 - target: 0 - unit: instance - monitoring: False - on_demand_instance_type: m4.large - product: Linux/UNIX - user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - target_tracking_policies: - - policy_name: test-target-tracking-1 - namespace: AWS/EC2 - metric_name: CPUUtilization - statistic: average - unit: percent - target: 50 - cooldown: 120 - source: cloudWatch - up_scaling_policies: - - policy_name: test-scaling-policies-1 - namespace: AWS/EC2 - metric_name: CPUUtilization - statistic: average - unit: percent - cooldown: 120 - threshold: 50 - source: cloudWatch - dimensions: - - name: InstanceId - evaluation_periods: 5 - period: 300 - action_type: adjustment - adjustment: 1 - - policy_name: test-scaling-policies-2 - namespace: AWS/EC2 - metric_name: CPUUtilization - statistic: average - unit: percent - cooldown: 120 - threshold: 50 - source: cloudWatch - dimensions: - - name: InstanceType - evaluation_periods: 5 - period: 300 - action_type: updateCapacity - target: 10 - maximum: 15 - minimum: 5 - down_scaling_policies: - - policy_name: test-scaling-policies-1 - namespace: AWS/EC2 - metric_name: CPUUtilization - statistic: average - unit: percent - cooldown: 120 - threshold: 50 - source: cloudWatch - dimensions: - - name: InstanceId - evaluation_periods: 5 - period: 300 - action_type: percentageAdjustment - adjustment: 20 - tags: - - Name: ansible_test_group - - Environment: dev - security_group_ids: - - sg-default - spot_instance_types: - - m4.xlarge - - m5.xlarge - do_not_update: - - image_id - - target - - user_data - register: result - - debug: var=result -``` -#### Stateful -- Persist your mounted root & data volumes along with connected ip addresses -```yaml -- hosts: localhost - tasks: - - name: create elastigroup - spotinst_aws_elastigroup: - name: ansible_test_group - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - - name: us-east-2b - subnet_id: subnet-123b - - name: us-east-2a - subnet_id: subnet-123a - image_id: test-ami - key_pair: test-key-pair - max_size: 2 - min_size: 0 - target: 0 - unit: instance - monitoring: False - on_demand_instance_type: m4.large - product: Linux/UNIX - user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - persistence: - should_persist_root_device: false - should_persist_block_devices: true - should_persist_private_ip: false - block_devices_mode: reattach - private_ips: - - 1.2.3.4 - - 2.3.4.5 - tags: - - Name: ansible_test_group - - Environment: dev - security_group_ids: - - sg-default - spot_instance_types: - - m4.xlarge - - m5.xlarge - do_not_update: - - image_id - - target - - user_data - register: result - - debug: var=result -``` -#### Scheduling -- Perform scheduled actions on your elastigroup such as scale, instance count adjustments etc. -```yaml -- hosts: localhost - tasks: - - name: create elastigroup - spotinst_aws_elastigroup: - name: ansible_test_group - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - - name: us-east-2b - subnet_id: subnet-123b - - name: us-east-2a - subnet_id: subnet-123a - image_id: test-ami - key_pair: test-key-pair - max_size: 2 - min_size: 0 - target: 0 - unit: instance - monitoring: False - on_demand_instance_type: m4.large - product: Linux/UNIX - user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - scheduled_tasks: - - task_type: scale - start_time: "2019-05-25T10:55:09Z" - scale_target_capacity: 3 - scale_min_capacity: 3 - scale_max_capacity: 3 - - task_type: backup_ami - frequency: hourly - - task_type: roll - cron_expression: "00 17 * * 3" - batch_size_percentage: 30 - - task_type: scaleDown - cron_expression: "00 22 * * 3" - adjustment: 1 - tags: - - Name: ansible_test_group - - Environment: dev - security_group_ids: - - sg-default - spot_instance_types: - - m4.xlarge - - m5.xlarge - do_not_update: - - image_id - - target - - user_data - register: result - - debug: var=result -``` -#### Load Balancing -- Integrate and connect your instances AWS's ELB and ALB along with Spotinst's MLB -```yaml -- hosts: localhost - tasks: - - name: create elastigroup - spotinst_aws_elastigroup: - name: ansible_test_group - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - - name: us-east-2b - subnet_id: subnet-123b - - name: us-east-2a - subnet_id: subnet-123a - image_id: test-ami - key_pair: test-key-pair - max_size: 2 - min_size: 0 - target: 0 - unit: instance - monitoring: False - on_demand_instance_type: m4.large - product: Linux/UNIX - user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - load_balancers: - - test_classic_elb - target_group_arns: - - "arn:aws:elasticloadbalancing:us-west-2:922761411234:targetgroup/TestTargetGroup/123abc" - mlb_load_balancers: - - target_set_id: "ts-123456789" - balancer_id: "lb-123456789" - auto_weight: true - az_awareness: false - tags: - - Name: ansible_test_group - - Environment: dev - security_group_ids: - - sg-default - spot_instance_types: - - m4.xlarge - - m5.xlarge - do_not_update: - - image_id - - target - - user_data - register: result - - debug: var=result -``` -#### Variable usage & output -- In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their instance ids -```yaml -- hosts: localhost - tasks: - - name: create elastigroup - spotinst_aws_elastigroup: - profile: ci - name: ansible_test_group - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-east-2c - subnet_id: subnet-39e2d574 #stg-subnet1 (az1) - - name: us-east-2b - subnet_id: subnet-a9f008d3 #stg-subnet2 (az2) - - name: us-east-2a - subnet_id: subnet-50c24238 #stg-subnet3 (az3) - image_id: test-ami - key_pair: spotinst_ci - max_size: 2 - min_size: 0 - target: 2 - unit: instance - monitoring: False - on_demand_instance_type: m4.large - product: Linux/UNIX - tags: - - Name: ansible_test_group - security_group_ids: - - sg-8ad2bbe1 - spot_instance_types: - - m4.xlarge - - m5.xlarge - do_not_update: - - image_id - - target - - user_data - wait_for_instances: True - wait_timeout: 600 - register: result - - - name: Store instance ids to file - shell: echo {{ item.instance_id }}\\n >> list_of_instance_ids - with_items: "{{ result.instances }}" - - debug: var=result -``` - -### Third Party Integrations -- ##### [ecs](examples/elastigroup-ecs.yml) -- ##### [kubernetes](examples/elastigroup-kubernetes.yml) -- ##### [nomad](examples/elastigroup-nomad.yml) -- ##### [docker swarm](examples/elastigroup-docker-swarm.yml) -- ##### [code-deploy](examples/elastigroup-code-deploy.yml) -- ##### [route53](examples/elastigroup-route53.yml) -- ##### [elastic-beanstalk](examples/elastigroup-elastic-beanstalk.yml) -- ##### [rancher](examples/elastigroup-rancher.yml) diff --git a/docs/argument_reference.yml b/docs/argument_reference_eg.yml similarity index 84% rename from docs/argument_reference.yml rename to docs/argument_reference_eg.yml index f0ac457..be9a428 100644 --- a/docs/argument_reference.yml +++ b/docs/argument_reference_eg.yml @@ -1,104 +1,50 @@ options: - credentials_path: - description: - - credentials file path. - default: ~/.spotinst/credentials - type: str - profile: + credentials_path: description: - - credentials profile to use - default: default - type: str + - (String) Optional parameter that allows to set a non-default credentials path. + Default is ~/.spotinst/credentials account_id: description: - - account id to authenticate with - default: taken from credentials file - type: str + - (String) Optional parameter that allows to set an account-id inside the module configuration + By default this is retrieved from the credentials path availability_vs_cost: - description: - - The strategy orientation - required: true choices: - availabilityOriented - costOriented - balanced + description: + - (String) The strategy orientation. + required: true availability_zones: description: - - availability zone configuration + - (List of Objects) a list of hash/dictionaries of Availability Zones that are configured in the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are + name (String), + subnet_id (String), + placement_group_name (String), required: true - supoptions: - name: - description: availability zone name - type: str - subnet_id: - description: subnet id - type: str - placement_group_name: - description: placement group name - type: str block_device_mappings: description: - - EBS configurations for elastigroup - suboptions: - device_name: - description: ebs device name - type: str - virtual_name: - description: ebs device virtual name - type: str - no_device: - description: unmap a defined device - type: bool - default: "" - ebs: - description: ebs configuration - suboptions: - delete_on_termination: - description: delete the volume when instance is terminated - type: bool - encrypted: - description: ebs device encryption - type: bool - iops: - description: ebs device iops - type: int - snapshot_id: - description: snapshot id - type: str - volume_type: - description: volume type - default: standard - choices: - - standard - - io1 - - gp2 - - st1 - - sc1 - volume_size: - description: volume size - type: int - kms_key_id: - description: kms key id - type: str - - chef: - description: - - chef integration configuration - suboptions: - user: - description: user name - type: str - pem_key: - description: pem key - type: str - chef_version: - description: version - type: str + - (List of Objects) a list of hash/dictionaries of Block Device Mappings for elastigroup instances; + You can specify virtual devices and EBS volumes.; + '[{"key":"value", "key":"value"}]'; + keys allowed are + device_name (List of Strings), + virtual_name (String), + no_device (String), + ebs (Object, expects the following keys- + delete_on_termination(Boolean), + encrypted(Boolean), + iops (Integer), + snapshot_id(Integer), + volume_type(String), + volume_size(Integer)) code_deploy: description: @@ -121,6 +67,16 @@ options: description: terminate instance on failure type: bool + chef: + description: + - (Object) The Chef integration configuration.; + Expects the following keys - chef_server (String), + organization (String), + user (String), + pem_key (String), + chef_version (String) + + docker_swarm: description: - (Object) The Docker Swarm integration configuration.; @@ -148,6 +104,14 @@ options: - (Boolean) Enable EBS optimization for supported instances which are not enabled by default.; Note - additional charges will be applied. + ebs_volume_pool: + description: + - (List of Objects) a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + volume_ids (List of Strings), + device_name (String) + ecs: description: - (Object) The ECS integration configuration.; @@ -167,6 +131,10 @@ options: down (Object expecting the following key - down_evaluation_periods (Integer))) + elastic_ips: + description: + - (List of Strings) List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances + elastic_beanstalk: description: - (Object) The ElasticBeanstalk integration configuration.; @@ -180,10 +148,6 @@ options: action (String), should_drain_instances (Boolean))) - elastic_ips: - description: - - (List of Strings) List of ElasticIps Allocation Ids to associate to the group instances - fallback_to_od: description: - (Boolean) In case of no spots available, Elastigroup will launch an On-demand instance instead @@ -348,7 +312,6 @@ options: down (Object expecting the following key - down_evaluation_periods (Integer))) - on_demand_count: description: - (Integer) Required if risk is not set @@ -364,8 +327,7 @@ options: description: - (Object) The elastigroup OpsWorks integration configration.; Expects the following key - - layer_id (String), - stark_type (String) + layer_id (String) persistence: description: @@ -373,20 +335,15 @@ options: Accepts the following keys - should_persist_root_device (Boolean), should_persist_block_devices (Boolean), - should_persist_private_ip (Boolean), - block_devices_mode (String) - - preferred_availability_zones: - description: - - (List) The preferred availability zones instance should spin instances in.; - + should_persist_private_ip (Boolean) + preferred_spot_instance_types: description: - (List) The preferred spot instance types.; private_ips: description: - - (List) List of Private IPs to associate to the group instances.; + - (List) List of Private IPs to associate to the group instances. product: choices: @@ -404,17 +361,14 @@ options: description: - (Object) The Rancher integration configuration.; Expects the following keys - + version (String), access_key (String), secret_key (String), - master_host (String), - version (String) + master_host (String) revert_to_spot: description: - - (Object) Hold settings for strategy correction - replacing On-Demand for Spot instances.; - Expects the following keys - - perform_at (String), - time_windows (List of Strings representing the time windows) + - (Object) Contains parameters for revert to spot right_scale: description: @@ -422,12 +376,20 @@ options: Expects the following keys - account_id (String), refresh_token (String) - region (String) risk: description: - (Integer) required if on demand is not set. The percentage of Spot instances to launch (0 - 100). + roll_config: + description: + - (Object) Roll configuration.; + If you would like the group to roll after updating, please use this feature. + Accepts the following keys - + batch_size_percentage(Integer, Required), + grace_period - (Integer, Required), + health_check_type(String, Optional) + route53: description: - (Object) The Route53 integration configuration.; @@ -438,15 +400,6 @@ options: name (String) use_public_ip (Boolean))) - roll_config: - description: - - (Object) Roll configuration.; - If you would like the group to roll after updating, please use this feature. - Accepts the following keys - - batch_size_percentage(Integer, Required), - grace_period - (Integer, Required), - health_check_type(String, Optional) - scheduled_tasks: description: - (List of Objects) a list of hash/dictionaries of scheduled tasks to configure in the elastigroup; @@ -462,8 +415,7 @@ options: frequency (String), grace_period (Integer), task_type (String, required), - is_enabled (Boolean), - start_time (String) + is_enabled (Boolean) security_group_ids: description: @@ -471,7 +423,7 @@ options: In case of update it will override the existing Security Group with the new given array required: true - shut_down_script: + shutdown_script: description: - (String) The Base64-encoded shutdown script that executes prior to instance termination. Encode before setting. @@ -515,10 +467,6 @@ options: description: - (Boolean) Enable deletion of volumes on stateful group deletion - subnet_ids: - description: - - (List) A comma-separated list of subnet identifiers for your group - tags: description: - (List of tagKey:tagValue paris) a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); @@ -560,8 +508,7 @@ options: namespace (String, required), metric_name (String, required), dimensions (List of Objects, Keys allowed are name (String, required) and value (String)), - statistic (String, required), - extended_statistic (String), + statistic (String, required) evaluation_periods (String, required), period (String, required), threshold (String, required), @@ -573,8 +520,7 @@ options: min_target_capacity (String), target (String), maximum (String), - minimum (String), - source (String) + minimum (String) down_scaling_policies: @@ -587,7 +533,6 @@ options: metric_name (String, required), dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)), statistic (String, required), - extended_statistic (String), evaluation_periods (String, required), period (String, required), threshold (String, required), @@ -599,8 +544,7 @@ options: max_target_capacity (String), target (String), maximum (String), - minimum (String), - source (String) + minimum (String) target_tracking_policies: description: @@ -644,4 +588,4 @@ options: wait_timeout: description: - (Integer) How long the module should wait for instances before failing the action.; - Only works if wait_for_instances is True. \ No newline at end of file + Only works if wait_for_instances is True. diff --git a/docs/argument_reference_emr.yml b/docs/argument_reference_emr.yml new file mode 100644 index 0000000..24b308f --- /dev/null +++ b/docs/argument_reference_emr.yml @@ -0,0 +1,78 @@ +options: + credentials_path: + description: + - (String) Optional parameter that allows to set a non-default credentials path. + Default is ~/.spotinst/credentials + required: false + + account_id: + description: + - (String) Optional parameter that allows to set an account-id inside the module configuration + Bydefault this is retrieved from the credentials path + required: false + + token: + description: + - (String) Optional parameter that allows to set an token inside the module configuration + By default this is retrieved from the credentials path + required: false + + state: + choices: + - present + - absent + description: + - (String) If set to present will either create or update, if absent will delete + required: false + + uniqueness_by: + choices: + - id + - name + description: + - (String) If set to id an id must be provided, if name no id is needed + required: false + + name: + description: + - (String) Name for EMR cluster + required: true + + description: + description: + - (String) Description of EMR cluster + required: false + + region: + description: + - (String) Region to deploy EMR cluster instance Groups + required: true + + strategy: + choices: + - new + - clone + - wrap + description: + - (Dict) Choose to create new cluster, clone an existing cluster or wrap an existing cluster + required: true + + scheduling: + description: + - (Dict) List of Scheduled tasks to perform + required: false + + scaling: + description: + - (Dict) Lists of up and down scaling policies + required: false + + compute: + description: + - (Dict) Schema that contains instance groups and other important resource parameters + required: true + + cluster: + description: + - (Dict) Schema that contains cluster parameters + required: false diff --git a/docs/argument_reference_ocean.yml b/docs/argument_reference_ocean.yml new file mode 100644 index 0000000..45ab189 --- /dev/null +++ b/docs/argument_reference_ocean.yml @@ -0,0 +1,74 @@ +options: + credentials_path: + description: + - (String) Optional parameter that allows to set a non-default credentials path. + Default is ~/.spotinst/credentials + required: false + + account_id: + description: + - (String) Optional parameter that allows to set an account-id inside the module configuration + Bydefault this is retrieved from the credentials path + required: false + + token: + description: + - (String) Optional parameter that allows to set an token inside the module configuration + By default this is retrieved from the credentials path + required: false + + state: + choices: + - present + - absent + description: + - (String) If set to present will either create or update, if absent will delete + required: false + + uniqueness_by: + choices: + - id + - name + description: + - (String) If set to id an id must be provided, if name no id is needed + required: false + + name: + description: + - (String) Name for Ocean cluster + required: true + + region: + description: + - (String) Region to deploy Ocean cluster + required: true + + controller_cluster_id: + description: + - (String) This ID must be unique for each Ocean cluster per account + required: true + + region: + description: + - (String) Region to deploy Ocean cluster instance Groups + required: true + + auto_scaler: + description: + - (Dict) Schema containing info on how auto scaler will function + required: true + + capacity: + description: + - (Dict) Schema containing target, min, and max + required: true + + strategy: + description: + - (Dict) Schema containing how to run the cluster + required: true + + compute: + description: + - (Dict) Schema containing info on the type of compute resources to use + required: true \ No newline at end of file diff --git a/examples/elastigroup/README.md b/examples/elastigroup/README.md new file mode 100644 index 0000000..22f335a --- /dev/null +++ b/examples/elastigroup/README.md @@ -0,0 +1,20 @@ +## Elastigroup + + * [Getting Started With Elastigroup](./elastigroup-basic.yml) + * [Elastigroup Advanced](./elastigroup-advanced.yml) + * [Elastigroup Additional Configurations](./elastigroup-additional-configurations.yml) + * [Scaling](./elastigroup-scaling-policies.yml) + * [Stateful](./elastigroup-stateful.yml) + * [Scheduling](./elastigroup-scheduling.yml) + * [Load Balancing](./elastigroup-load-balancers.yml) + * Third Party Integrations + * [ECS](./elastigroup-ecs.yml) + * [Kubernetes](./elastigroup-kubernetes.yml) + * [Nomad](./elastigroup-nomad.yml) + * [Docker Swarm](./elastigroup-docker-swarm.yml) + * [CodeDeploy](./elastigroup-code-deploy.yml) + * [Route53](./elastigroup-route53.yml) + * [ElasticBeanstalk](./elastigroup-elasticbeanstalk.yml) + + + diff --git a/examples/elastigroup/elastigroup-additional-configurations.yml b/examples/elastigroup/elastigroup-additional-configurations.yml new file mode 100644 index 0000000..5e48353 --- /dev/null +++ b/examples/elastigroup/elastigroup-additional-configurations.yml @@ -0,0 +1,95 @@ +- hosts: localhost + tasks: + - name: create elastigroup + spotinst_aws_elastigroup: + name: ansible_test_group + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-east-2c + subnet_id: subnet-123c + - name: us-east-2b + subnet_id: subnet-123b + - name: us-east-2a + subnet_id: subnet-123a + image_id: test-ami + key_pair: test-key-pair + max_size: 2 + min_size: 0 + target: 0 + unit: instance + monitoring: False + on_demand_instance_type: m4.large + product: Linux/UNIX + user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== + shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== + target_tracking_policies: + - policy_name: test-target-tracking-1 + namespace: AWS/EC2 + metric_name: CPUUtilization + statistic: average + unit: percent + target: 50 + cooldown: 120 + source: cloudWatch + up_scaling_policies: + - policy_name: test-scaling-policies-1 + namespace: AWS/EC2 + metric_name: CPUUtilization + statistic: average + unit: percent + cooldown: 120 + threshold: 50 + source: cloudWatch + dimensions: + - name: InstanceId + evaluation_periods: 5 + period: 300 + action_type: adjustment + adjustment: 1 + - policy_name: test-scaling-policies-2 + namespace: AWS/EC2 + metric_name: CPUUtilization + statistic: average + unit: percent + cooldown: 120 + threshold: 50 + source: cloudWatch + dimensions: + - name: InstanceType + evaluation_periods: 5 + period: 300 + action_type: updateCapacity + target: 10 + maximum: 15 + minimum: 5 + down_scaling_policies: + - policy_name: test-scaling-policies-1 + namespace: AWS/EC2 + metric_name: CPUUtilization + statistic: average + unit: percent + cooldown: 120 + threshold: 50 + source: cloudWatch + dimensions: + - name: InstanceId + evaluation_periods: 5 + period: 300 + action_type: percentageAdjustment + adjustment: 20 + tags: + - Name: ansible_test_group + - Environment: dev + security_group_ids: + - sg-default + spot_instance_types: + - m4.xlarge + - m5.xlarge + do_not_update: + - image_id + - target + - user_data + register: result + - debug: var=result \ No newline at end of file diff --git a/examples/elastigroup-advanced.yml b/examples/elastigroup/elastigroup-advanced.yml similarity index 100% rename from examples/elastigroup-advanced.yml rename to examples/elastigroup/elastigroup-advanced.yml diff --git a/examples/elastigroup-basic.yml b/examples/elastigroup/elastigroup-basic.yml similarity index 97% rename from examples/elastigroup-basic.yml rename to examples/elastigroup/elastigroup-basic.yml index 134784e..cc28f9b 100644 --- a/examples/elastigroup-basic.yml +++ b/examples/elastigroup/elastigroup-basic.yml @@ -32,4 +32,4 @@ - target - user_data register: result - - debug: var=result + - debug: var=result \ No newline at end of file diff --git a/examples/elastigroup/elastigroup-beanstalk.yml b/examples/elastigroup/elastigroup-beanstalk.yml new file mode 100644 index 0000000..575e71e --- /dev/null +++ b/examples/elastigroup/elastigroup-beanstalk.yml @@ -0,0 +1,42 @@ +#In this basic example, we create a simple elastigroup wit beanstalk integration + +- hosts: localhost + tasks: + - name: create elastigroup + spotinst_aws_elastigroup: + account_id: + token: + name: ansible_test_beanstalk_group + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2b + subnet_id: subnet-1ba25052 + image_id: ami-f173cc91 + key_pair: Noam-key + max_size: 2 + min_size: 0 + target: 0 + unit: instance + monitoring: False + on_demand_instance_type: m4.large + product: Linux/UNIX + elastic_beanstalk: + environment_id: e-3tkmbj7hzc + managed_actions: + platform_update: + perform_at: timeWindow + time_window: Sun:01:00-Sun:02:00 + update_level: minorAndPatch + security_group_ids: + - sg-default + spot_instance_types: + - m4.xlarge + - m5.xlarge + do_not_update: + - image_id + - target + - user_data + register: result + - debug: var=result \ No newline at end of file diff --git a/examples/elastigroup-code-deploy.yml b/examples/elastigroup/elastigroup-code-deploy.yml similarity index 70% rename from examples/elastigroup-code-deploy.yml rename to examples/elastigroup/elastigroup-code-deploy.yml index 9689b15..64604c3 100644 --- a/examples/elastigroup-code-deploy.yml +++ b/examples/elastigroup/elastigroup-code-deploy.yml @@ -1,22 +1,20 @@ -#Integrate and Spotinst elastigroup with AWS's CodeDeploy +#Integrate and Spotinst elastigroup with AWS's CodeDeploy - hosts: localhost tasks: - name: create elastigroup spotinst_aws_elastigroup: - name: ansible_test_group - state: present + account_id: + token: + name: ansible_code_deploy_group + state: absent risk: 100 availability_vs_cost: balanced availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - - name: us-east-2b - subnet_id: subnet-123b - - name: us-east-2a - subnet_id: subnet-123a - image_id: test-ami - key_pair: test-key-pair + - name: us-west-2b + subnet_id: subnet-1ba25052 + image_id: ami-f173cc91 + key_pair: Noam-key max_size: 2 min_size: 0 target: 0 @@ -24,8 +22,6 @@ monitoring: False on_demand_instance_type: m4.large product: Linux/UNIX - user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== code_deploy: deployment_groups: - application_name: test-app-1 @@ -47,4 +43,4 @@ - target - user_data register: result - - debug: var=result + - debug: var=result \ No newline at end of file diff --git a/examples/elastigroup-docker-swarm.yml b/examples/elastigroup/elastigroup-docker-swarm.yml similarity index 71% rename from examples/elastigroup-docker-swarm.yml rename to examples/elastigroup/elastigroup-docker-swarm.yml index 468f712..426c71f 100644 --- a/examples/elastigroup-docker-swarm.yml +++ b/examples/elastigroup/elastigroup-docker-swarm.yml @@ -1,22 +1,20 @@ -#Integrate and Spotinst elastigroup with Docker Swarm +#Integrate and Spotinst elastigroup with Docker Swarm - hosts: localhost tasks: - name: create elastigroup spotinst_aws_elastigroup: - name: ansible_test_group - state: present + account_id: + token: + name: ansible_docker_swarm_test_group + state: absent risk: 100 availability_vs_cost: balanced availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - - name: us-east-2b - subnet_id: subnet-123b - - name: us-east-2a - subnet_id: subnet-123a - image_id: test-ami - key_pair: test-key-pair + - name: us-west-2b + subnet_id: subnet-1ba25052 + image_id: ami-f173cc91 + key_pair: Noam-key max_size: 2 min_size: 0 target: 0 @@ -24,8 +22,6 @@ monitoring: False on_demand_instance_type: m4.large product: Linux/UNIX - user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== docker_swarm: master_host: test-domain.com master_port: 80 @@ -51,4 +47,4 @@ - target - user_data register: result - - debug: var=result + - debug: var=result \ No newline at end of file diff --git a/examples/elastigroup-ecs.yml b/examples/elastigroup/elastigroup-ecs.yml similarity index 75% rename from examples/elastigroup-ecs.yml rename to examples/elastigroup/elastigroup-ecs.yml index 2616e09..f384d34 100644 --- a/examples/elastigroup-ecs.yml +++ b/examples/elastigroup/elastigroup-ecs.yml @@ -4,19 +4,17 @@ tasks: - name: create elastigroup spotinst_aws_elastigroup: - name: ansible_test_group - state: present + account_id: + token: + name: ansible_ecs_group + state: absent risk: 100 availability_vs_cost: balanced availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - - name: us-east-2b - subnet_id: subnet-123b - - name: us-east-2a - subnet_id: subnet-123a - image_id: test-ami - key_pair: test-key-pair + - name: us-west-2b + subnet_id: subnet-1ba25052 + image_id: ami-f173cc91 + key_pair: Noam-key max_size: 2 min_size: 0 target: 0 @@ -24,8 +22,6 @@ monitoring: False on_demand_instance_type: m4.large product: Linux/UNIX - user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== ecs: cluster_name: test-cluster-name auto_scale: diff --git a/examples/elastigroup-elastic-beanstalk.yml b/examples/elastigroup/elastigroup-elasticbeanstalk.yml similarity index 67% rename from examples/elastigroup-elastic-beanstalk.yml rename to examples/elastigroup/elastigroup-elasticbeanstalk.yml index 9bdec08..82988c4 100644 --- a/examples/elastigroup-elastic-beanstalk.yml +++ b/examples/elastigroup/elastigroup-elasticbeanstalk.yml @@ -1,23 +1,19 @@ - #Integrate and Spotinst elastigroup with AWS's ElasticBeanstalk - - hosts: localhost tasks: - name: create elastigroup spotinst_aws_elastigroup: - name: ansible_test_group - state: present + account_id: + token: + name: ansible_elasticbeanstalk_group + state: absent risk: 100 availability_vs_cost: balanced availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - - name: us-east-2b - subnet_id: subnet-123b - - name: us-east-2a - subnet_id: subnet-123a - image_id: test-ami - key_pair: test-key-pair + - name: us-west-2b + subnet_id: subnet-1ba25052 + image_id: ami-f173cc91 + key_pair: Noam-key max_size: 2 min_size: 0 target: 0 @@ -25,10 +21,8 @@ monitoring: False on_demand_instance_type: m4.large product: Linux/UNIX - user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== elastic_beanstalk: - environment_id: test-environment + environment_id: e-3tkmbj7hzc deployment_preferences: automatic_roll: true batch_size_percentage: 50 @@ -49,4 +43,4 @@ - target - user_data register: result - - debug: var=result + - debug: var=result \ No newline at end of file diff --git a/examples/elastigroup-kubernetes.yml b/examples/elastigroup/elastigroup-kubernetes.yml similarity index 76% rename from examples/elastigroup-kubernetes.yml rename to examples/elastigroup/elastigroup-kubernetes.yml index e005baa..cd0034b 100644 --- a/examples/elastigroup-kubernetes.yml +++ b/examples/elastigroup/elastigroup-kubernetes.yml @@ -4,19 +4,17 @@ tasks: - name: create elastigroup spotinst_aws_elastigroup: - name: ansible_test_group + account_id: + token: + name: test state: present risk: 100 availability_vs_cost: balanced availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - - name: us-east-2b - subnet_id: subnet-123b - - name: us-east-2a - subnet_id: subnet-123a - image_id: test-ami - key_pair: test-key-pair + - name: us-west-2b + subnet_id: subnet-1ba25052 + image_id: ami-f173cc91 + key_pair: Noam-key max_size: 2 min_size: 0 target: 0 @@ -24,12 +22,9 @@ monitoring: False on_demand_instance_type: m4.large product: Linux/UNIX - user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== kubernetes: cluster_identifier: test-cluster-id api-server: 127.0.0.1 - token: test-secret integration_mode: pod auto_scale: is_enabled: true diff --git a/examples/elastigroup-load-balancers.yml b/examples/elastigroup/elastigroup-load-balancers.yml similarity index 88% rename from examples/elastigroup-load-balancers.yml rename to examples/elastigroup/elastigroup-load-balancers.yml index 030cff5..f699190 100644 --- a/examples/elastigroup-load-balancers.yml +++ b/examples/elastigroup/elastigroup-load-balancers.yml @@ -30,11 +30,6 @@ - test_classic_elb target_group_arns: - "arn:aws:elasticloadbalancing:us-west-2:922761411234:targetgroup/TestTargetGroup/123abc" - mlb_load_balancers: - - target_set_id: "ts-123456789" - balancer_id: "lb-123456789" - auto_weight: true - az_awareness: false tags: - Name: ansible_test_group - Environment: dev diff --git a/examples/elastigroup/elastigroup-mlb-load-balancer.yml b/examples/elastigroup/elastigroup-mlb-load-balancer.yml new file mode 100644 index 0000000..ff04281 --- /dev/null +++ b/examples/elastigroup/elastigroup-mlb-load-balancer.yml @@ -0,0 +1,49 @@ +#Integrate and connect your instancesAWS's ELB and ALB along with Spotinst's MLB + +- hosts: localhost + tasks: + - name: create elastigroup + spotinst_aws_elastigroup: + account_id: + token: + name: ansible_mlb_lb_test_group + state: absent + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2b + subnet_id: subnet-1ba25052 + image_id: ami-f173cc91 + key_pair: Noam-key + max_size: 2 + min_size: 0 + target: 0 + unit: instance + monitoring: False + on_demand_instance_type: m4.large + product: Linux/UNIX + mlb_runtime: + deployment_id: dp-rm0f5b912345 + load_balancers: + - test_classic_elb + target_group_arns: + - "arn:aws:elb:us-west-2:123:targetgroup/TestTargetGroup/123abc" + mlb_load_balancers: + - target_set_id: "ts-123456789" + balancer_id: "lb-123456789" + auto_weight: true + az_awareness: false + tags: + - Name: ansible_test_group + - Environment: dev + security_group_ids: + - sg-default + spot_instance_types: + - m4.xlarge + - m5.xlarge + do_not_update: + - image_id + - target + - user_data + register: result + - debug: var=result \ No newline at end of file diff --git a/examples/elastigroup-nomad.yml b/examples/elastigroup/elastigroup-nomad.yml similarity index 74% rename from examples/elastigroup-nomad.yml rename to examples/elastigroup/elastigroup-nomad.yml index 219e79a..41dd7ad 100644 --- a/examples/elastigroup-nomad.yml +++ b/examples/elastigroup/elastigroup-nomad.yml @@ -1,22 +1,19 @@ #Integrate and Spotinst elastigroup with Hashicorp's Nomad - - hosts: localhost tasks: - name: create elastigroup spotinst_aws_elastigroup: - name: ansible_test_group - state: present + account_id: + token: + name: ansible_nomad_test_group + state: absent risk: 100 availability_vs_cost: balanced availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - - name: us-east-2b - subnet_id: subnet-123b - - name: us-east-2a - subnet_id: subnet-123a - image_id: test-ami - key_pair: test-key-pair + - name: us-west-2b + subnet_id: subnet-1ba25052 + image_id: ami-f173cc91 + key_pair: Noam-key max_size: 2 min_size: 0 target: 0 @@ -24,8 +21,6 @@ monitoring: False on_demand_instance_type: m4.large product: Linux/UNIX - user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== nomad: master_host: test-domain.com master_port: 80 @@ -57,4 +52,4 @@ - target - user_data register: result - - debug: var=result + - debug: var=result \ No newline at end of file diff --git a/examples/elastigroup-stateful.yml b/examples/elastigroup/elastigroup-persistance.yml similarity index 100% rename from examples/elastigroup-stateful.yml rename to examples/elastigroup/elastigroup-persistance.yml diff --git a/examples/elastigroup-rancher.yml b/examples/elastigroup/elastigroup-rancher.yml similarity index 100% rename from examples/elastigroup-rancher.yml rename to examples/elastigroup/elastigroup-rancher.yml diff --git a/examples/elastigroup-route53.yml b/examples/elastigroup/elastigroup-route53.yml similarity index 73% rename from examples/elastigroup-route53.yml rename to examples/elastigroup/elastigroup-route53.yml index 2bb2f4a..0ff7583 100644 --- a/examples/elastigroup-route53.yml +++ b/examples/elastigroup/elastigroup-route53.yml @@ -1,22 +1,20 @@ -#Integrate and Spotinst elastigroup with AWS's Route53 +#Integrate and Spotinst elastigroup with AWS's Route53 - hosts: localhost tasks: - name: create elastigroup spotinst_aws_elastigroup: - name: ansible_test_group - state: present + account_id: + token: + name: ansible_route53_test_group + state: absent risk: 100 availability_vs_cost: balanced availability_zones: - - name: us-east-2c - subnet_id: subnet-123c - - name: us-east-2b - subnet_id: subnet-123b - - name: us-east-2a - subnet_id: subnet-123a - image_id: test-ami - key_pair: test-key-pair + - name: us-west-2b + subnet_id: subnet-1ba25052 + image_id: ami-f173cc91 + key_pair: Noam-key max_size: 2 min_size: 0 target: 0 @@ -24,8 +22,6 @@ monitoring: False on_demand_instance_type: m4.large product: Linux/UNIX - user_data: IyEvdXNyL2Jpbi9lbnYgYmFzaA== - shutdown_script: IyEvdXNyL2Jpbi9lbnYgYmFzaA== route53: domains: - hosted_zone_id: abc234 @@ -53,4 +49,4 @@ - target - user_data register: result - - debug: var=result + - debug: var=result \ No newline at end of file diff --git a/examples/elastigroup-scaling-policies.yml b/examples/elastigroup/elastigroup-scaling-policies.yml similarity index 100% rename from examples/elastigroup-scaling-policies.yml rename to examples/elastigroup/elastigroup-scaling-policies.yml diff --git a/examples/elastigroup-scheduling.yml b/examples/elastigroup/elastigroup-scheduling.yml similarity index 100% rename from examples/elastigroup-scheduling.yml rename to examples/elastigroup/elastigroup-scheduling.yml diff --git a/examples/elastigroup/elastigroup-stateful.yml b/examples/elastigroup/elastigroup-stateful.yml new file mode 100644 index 0000000..f647f20 --- /dev/null +++ b/examples/elastigroup/elastigroup-stateful.yml @@ -0,0 +1,42 @@ +#Integrate and Spotinst elastigroup with Stateful delete options + +- hosts: localhost + tasks: + - name: create elastigroup + spotinst_aws_elastigroup: + account_id: + token: + name: ansible_stateful_group + state: absent + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2b + subnet_id: subnet-1ba25052 + image_id: ami-f173cc91 + key_pair: Noam-key + max_size: 2 + min_size: 0 + target: 0 + unit: instance + monitoring: False + on_demand_instance_type: m4.large + product: Linux/UNIX + tags: + - Name: ansible_test_group + - Environment: dev + security_group_ids: + - sg-default + spot_instance_types: + - m4.xlarge + - m5.xlarge + do_not_update: + - image_id + - target + - user_data + stateful_deallocation_should_delete_images: false + stateful_deallocation_should_delete_network_interfaces: false + stateful_deallocation_should_delete_snapshots: false + stateful_deallocation_should_delete_volumes: false + register: result + - debug: var=result \ No newline at end of file diff --git a/examples/elastigroup-variable-retrieval.yml b/examples/elastigroup/elastigroup-variable-retrieval.yml similarity index 100% rename from examples/elastigroup-variable-retrieval.yml rename to examples/elastigroup/elastigroup-variable-retrieval.yml diff --git a/examples/emr/README.md b/examples/emr/README.md new file mode 100644 index 0000000..38e17f2 --- /dev/null +++ b/examples/emr/README.md @@ -0,0 +1,4 @@ +## EMR + + * [Create EMR Cluster](./spotinst-emr.yml) + diff --git a/examples/emr/spotinst-emr.yml b/examples/emr/spotinst-emr.yml new file mode 100644 index 0000000..0a647f6 --- /dev/null +++ b/examples/emr/spotinst-emr.yml @@ -0,0 +1,62 @@ +#Integrate and Spotinst elastigroup with Kubernetes + +- hosts: localhost + tasks: + - name: create emr + spotinst_aws_mrscaler: + account_id: + token: + state: present + name: ansible_test_group + description: this is from ansible + region: us-west-2 + strategy: + new: + release_label: emr-5.17.0 + provisioning_timeout: + timeout: 15 + timeout_action: terminate + compute: + availability_zones: + - name: us-west-2b + subnet_id: subnet-1ba25052 + instance_groups: + master_group: + instance_types: + - m3.xlarge + target: 1 + life_cycle: ON_DEMAND + core_group: + instance_types: + - m3.xlarge + target: 1 + life_cycle: SPOT + task_group: + instance_types: + - m3.xlarge + capacity: + minimum: 0 + maximum: 0 + target: 0 + life_cycle: SPOT + emr_managed_master_security_group: sg-8cfb40f6 + emr_managed_slave_security_group: sg-f2f94288 + additional_master_security_groups: + - sg-f2f94288 + additional_slave_security_groups: + - sg-8cfb40f6 + ec2_key_name: Noam-key + applications: + - name: Ganglia + version: "1.0" + - name: Hadoop + cluster: + visible_to_all_users: true + termination_protected: false + keep_job_flow_alive_when_no_steps: true + log_uri: s3://sorex-job-status + additional_info: "{'test':'more information'}" + job_flow_role: EMR_EC2_DefaultRole + security_configuration: test-config-jeffrey + register: result + - debug: var=result diff --git a/examples/events/README.md b/examples/events/README.md new file mode 100644 index 0000000..c65b206 --- /dev/null +++ b/examples/events/README.md @@ -0,0 +1,4 @@ +## Events + + * [Create Event Subscription](./spotinst-event-subscription.yml) + diff --git a/examples/events/spotinst-event-subscription.yml b/examples/events/spotinst-event-subscription.yml new file mode 100644 index 0000000..af7d8e2 --- /dev/null +++ b/examples/events/spotinst-event-subscription.yml @@ -0,0 +1,17 @@ +#In this basic example, we create an event subscription + +- hosts: localhost + tasks: + - name: create ocean + spotinst_event_subscription: + account_id: + token: + state: present + id: sis-e62dfd0f + resource_id: sig-992a78db + protocol: web + endpoint: https://webhook.com + event_type: GROUP_UPDATED + event_format: { "subject" : "%s", "message" : "%s" } + register: result + - debug: var=result \ No newline at end of file diff --git a/examples/ocean/README.md b/examples/ocean/README.md new file mode 100644 index 0000000..c1af38a --- /dev/null +++ b/examples/ocean/README.md @@ -0,0 +1,4 @@ +## Ocean + + * [Create Ocean Cluster](./spotinst-ocean.yml) + diff --git a/examples/ocean/spotinst-ocean.yml b/examples/ocean/spotinst-ocean.yml new file mode 100644 index 0000000..433ea94 --- /dev/null +++ b/examples/ocean/spotinst-ocean.yml @@ -0,0 +1,49 @@ +#In this basic example, we create an ocean cluster + +- hosts: localhost + tasks: + - name: create ocean + spotinst_ocean_cloud: + account_id: + token: + state: present + name: ansible_test_ocean + region: us-west-2 + controller_cluster_id: ocean.k8s + auto_scaler: + is_enabled: True + cooldown: 180 + resource_limits: + max_memory_gib: 1500 + max_vCpu: 750 + down: + evaluation_periods: 3 + headroom: + cpu_per_unit: 2000 + memory_per_unit: 0 + num_of_units: 4 + is_auto_config: True + capacity: + minimum: 0 + maximum: 0 + target: 0 + strategy: + utilize_reserved_instances: False + fallback_to_od: True + spot_percentage: 100 + compute: + instance_types: + whitelist: + - c4.8xlarge + subnet_ids: + - subnet-1ba25052 + launch_specification: + security_group_ids: + - sg-8cfb40f6 + image_id: ami-1178f169 + key_pair: Noam-key + tags: + - tag_key: tags + tag_value: test + register: result + - debug: var=result \ No newline at end of file diff --git a/spotinst/__init__.py b/spotinst/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/spotinst_aws_elastigroup.py b/spotinst/spotinst_aws_elastigroup.py similarity index 59% rename from spotinst_aws_elastigroup.py rename to spotinst/spotinst_aws_elastigroup.py index f72c73f..09f16ca 100644 --- a/spotinst_aws_elastigroup.py +++ b/spotinst/spotinst_aws_elastigroup.py @@ -19,34 +19,58 @@ You will have to have a credentials file in this location - /.spotinst/credentials The credentials file must contain a row that looks like this token = - Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible- + Full documentation available at U(https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-) requirements: - python >= 2.7 - spotinst_sdk >= 1.0.38 options: credentials_path: + type: str + default: "/root/.spotinst/credentials" description: - - (String) Optional parameter that allows to set a non-default credentials path. - Default is ~/.spotinst/credentials + - Optional parameter that allows to set a non-default credentials path. account_id: + type: str description: - - (String) Optional parameter that allows to set an account-id inside the module configuration - By default this is retrieved from the credentials path + - Optional parameter that allows to set an account-id inside the module configuration. By default this is retrieved from the credentials path + + token: + version_added: 2.8 + type: str + description: + - Optional parameter that allows to set an token inside the module configuration. By default this is retrieved from the credentials path + + state: + type: str + choices: + - present + - absent + default: present + description: + - create update or delete + + auto_apply_tags: + type: bool + description: + - Weather or not to apply tags without rolling group + version_added: 2.8 availability_vs_cost: + type: str choices: - availabilityOriented - costOriented - balanced description: - - (String) The strategy orientation. + - The strategy orientation. required: true availability_zones: + type: list description: - - (List of Objects) a list of hash/dictionaries of Availability Zones that are configured in the elastigroup; + - a list of hash/dictionaries of Availability Zones that are configured in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are name (String), @@ -55,8 +79,9 @@ required: true block_device_mappings: + type: list description: - - (List of Objects) a list of hash/dictionaries of Block Device Mappings for elastigroup instances; + - a list of hash/dictionaries of Block Device Mappings for elastigroup instances; You can specify virtual devices and EBS volumes.; '[{"key":"value", "key":"value"}]'; keys allowed are @@ -71,57 +96,139 @@ volume_type(String), volume_size(Integer)) + code_deploy: + version_added: 2.8 + type: dict + description: + - code deploy integration configuration + suboptions: + deployment_groups: + description: deployment groups configurations + type: list + suboptions: + application_name: + description: application name + type: str + deployment_group_name: + description: deployment group name + type: str + clean_up_on_failure: + description: clean up on failure + type: bool + terminate_instance_on_failure: + description: terminate instance on failure + type: bool + chef: + type: dict description: - - (Object) The Chef integration configuration.; - Expects the following keys - chef_server (String), + - The Chef integration configuration.; + Expects the following keys- + chef_server (String), organization (String), user (String), pem_key (String), chef_version (String) + + docker_swarm: + type: dict + version_added: 2.8 + description: + - The Docker Swarm integration configuration.; + Expects the following keys- + master_host (String), + master_port (Integer), + auto_scale (Object expects the following keys- + is_enabled (Boolean), + cooldown (Integer), + headroom (Object expects the following keys- + cpu_per_unit (Integer), + memory_per_unit (Integer), + num_of_units (Integer)), + key (String), + value (String)), + down (Object expecting the following key - + down_evaluation_periods (Integer))) + draining_timeout: + type: int description: - - (Integer) Time for instance to be drained from incoming requests and deregistered from ELB before termination. + - Time for instance to be drained from incoming requests and deregistered from ELB before termination. ebs_optimized: + type: bool description: - - (Boolean) Enable EBS optimization for supported instances which are not enabled by default.; + - Enable EBS optimization for supported instances which are not enabled by default.; Note - additional charges will be applied. ebs_volume_pool: + type: list description: - - (List of Objects) a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; + - a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; '[{"key":"value", "key":"value"}]'; keys allowed are - volume_ids (List of Strings), device_name (String) ecs: + type: dict description: - - (Object) The ECS integration configuration.; - Expects the following key - - cluster_name (String) - + - The ECS integration configuration.; + Expects the following keys - + cluster_name (String), + auto_scale (Object expects the following keys - + is_enabled (Boolean), + is_auto_config (Boolean), + cooldown (Integer), + headroom (Object expects the following keys - + cpu_per_unit (Integer), + memory_per_unit (Integer), + num_of_units (Integer)), + attributes (List of Objects expecting the following keys - + key (String), + value (String)), + down (Object expecting the following key - + down_evaluation_periods (Integer))) elastic_ips: + type: list description: - - (List of Strings) List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances + - List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances + + elastic_beanstalk: + version_added: 2.8 + type: dict + description: + - The ElasticBeanstalk integration configuration.; + Expects the following keys - + environment_id (String) + deployment_preferences (Object expects the following keys - + automatic_roll (Boolean), + batch_size_percentage (Integer), + grace_period (Integer), + strategy (Object expects the following keys- + action (String), + should_drain_instances (Boolean))) fallback_to_od: + type: bool description: - - (Boolean) In case of no spots available, Elastigroup will launch an On-demand instance instead + - In case of no spots available, Elastigroup will launch an On-demand instance instead health_check_grace_period: + type: int description: - - (Integer) The amount of time, in seconds, after the instance has launched to start and check its health. + - The amount of time, in seconds, after the instance has launched to start and check its health. default: 300 health_check_unhealthy_duration_before_replacement: + type: int description: - - (Integer) Minimal mount of time instance should be unhealthy for us to consider it unhealthy. + - Minimal mount of time instance should be unhealthy for us to consider it unhealthy. health_check_type: + type: str choices: - ELB - HCS @@ -129,86 +236,136 @@ - MLB - EC2 description: - - (String) The service to use for the health check. + - The service to use for the health check. iam_role_name: + type: str description: - - (String) The instance profile iamRole name + - The instance profile iamRole name - Only use iam_role_arn, or iam_role_name iam_role_arn: + type: str description: - - (String) The instance profile iamRole arn + - The instance profile iamRole arn - Only use iam_role_arn, or iam_role_name id: + type: str description: - - (String) The group id if it already exists and you want to update, or delete it. + - The group id if it already exists and you want to update, or delete it. This will not work unless the uniqueness_by field is set to id. When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. ignore_changes: + type: list choices: - image_id - target description: - - (List of Strings) list of fields on which changes should be ignored when updating + - list of fields on which changes should be ignored when updating image_id: + type: str description: - - (String) The image Id used to launch the instance.; + - The image Id used to launch the instance.; In case of conflict between Instance type and image type, an error will be returned required: true key_pair: + type: str description: - - (String) Specify a Key Pair to attach to the instances + - Specify a Key Pair to attach to the instances required: true kubernetes: + type: dict description: - - (Object) The Kubernetes integration configuration. + - The Kubernetes integration configuration.; Expects the following keys - api_server (String), - token (String) + token (String), + integration_mode (String), + cluster_identifier (String), + auto_scale (Object expects the following keys - + is_enabled (Boolean), + is_auto_config (Boolean), + cooldown (Integer), + headroom (Object expects the following keys - + cpu_per_unit (Integer), + memory_per_unit (Integer), + num_of_units (Integer)), + labels (List of Objects expecting the following keys - + key (String), + value (String)), + down (Object expecting the following key - + down_evaluation_periods (Integer))) lifetime_period: + type: str description: - - (String) lifetime period + - lifetime period load_balancers: + type: list description: - - (List of Strings) List of classic ELB names + - List of classic ELB names max_size: + type: int description: - - (Integer) The upper limit number of instances that you can scale up to + - The upper limit number of instances that you can scale up to required: true mesosphere: + type: dict description: - (Object) The Mesosphere integration configuration. Expects the following key - api_server (String) min_size: + type: int description: - - (Integer) The lower limit number of instances that you can scale down to + - The lower limit number of instances that you can scale down to required: true + mlb_load_balancers: + version_added: 2.8 + type: list + description: + - Objects representing mlb's.; + Expects the following keys- + target_set_id (String) + balancer_id (String) + auto_weight (String) + az_awareness (String) + type (String) MULTAI_TARGET_SET + + mlb_runtime: + version_added: 2.8 + type: dict + description: + - The Spotinst MLB Runtime integration configuration.; + Expects the following keys- + deployment_id (String) The runtime's deployment id + monitoring: + type: bool description: - - (Boolean) Describes whether instance Enhanced Monitoring is enabled + - Describes whether instance Enhanced Monitoring is enabled required: true name: + type: str description: - - (String) Unique name for elastigroup to be created, updated or deleted + - Unique name for elastigroup to be created, updated or deleted required: true network_interfaces: + type: list description: - - (List of Objects) a list of hash/dictionaries of network interfaces to add to the elastigroup; + - a list of hash/dictionaries of network interfaces to add to the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - description (String), @@ -223,32 +380,71 @@ associate_ipv6_address (Boolean), private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)) + nomad: + version_added: 2.8 + type: dict + description: + - The Nomad integration configuration.; + Expects the following keys- + master_host (String), + master_port (Integer), + acl_token (String), + auto_scale (Object expects the following keys- + is_enabled (Boolean), + cooldown (Integer), + headroom (Object expects the following keys- + cpu_per_unit (Integer), + memory_per_unit (Integer), + num_of_units (Integer)), + constraints (List of Objects expecting the following keys- + key (String), + value (String)), + down (Object expecting the following key - + down_evaluation_periods (Integer))) + on_demand_count: + type: int description: - - (Integer) Required if risk is not set + - Required if risk is not set - Number of on demand instances to launch. All other instances will be spot instances.; Either set this parameter or the risk parameter on_demand_instance_type: + type: str description: - - (String) On-demand instance type that will be provisioned + - On-demand instance type that will be provisioned required: true opsworks: + type: dict description: - - (Object) The elastigroup OpsWorks integration configration.; + - The elastigroup OpsWorks integration configration.; Expects the following key - layer_id (String) persistence: + type: dict description: - - (Object) The Stateful elastigroup configration.; - Accepts the following keys - - should_persist_root_device (Boolean), - should_persist_block_devices (Boolean), - should_persist_private_ip (Boolean) + - The Stateful elastigroup configration.; + Expects the following keys - + should_persist_root_device (Boolean), + should_persist_block_devices (Boolean), + should_persist_private_ip (Boolean) + + preferred_spot_instance_types: + version_added: 2.8 + type: list + description: + - The preferred spot instance types.; + + private_ips: + version_added: 2.8 + type: list + description: + - List of Private IPs to associate to the group instances. product: + type: str choices: - Linux/UNIX - SUSE Linux @@ -257,41 +453,64 @@ - SUSE Linux (Amazon VPC) - Windows description: - - (String) Operation system type._ + - Operation system type. required: true rancher: + type: dict description: - - (Object) The Rancher integration configuration.; + - The Rancher integration configuration.; Expects the following keys - version (String), access_key (String), secret_key (String), master_host (String) + revert_to_spot: + version_added: 2.8 + type: dict + description: + - Contains parameters for revert to spot + right_scale: + type: dict description: - - (Object) The Rightscale integration configuration.; + - The Rightscale integration configuration.; Expects the following keys - account_id (String), refresh_token (String) risk: + type: int description: - - (Integer) required if on demand is not set. The percentage of Spot instances to launch (0 - 100). + - required if on demand is not set. The percentage of Spot instances to launch (0 - 100). roll_config: + type: dict description: - - (Object) Roll configuration.; + - Roll configuration.; If you would like the group to roll after updating, please use this feature. Accepts the following keys - batch_size_percentage(Integer, Required), grace_period - (Integer, Required), health_check_type(String, Optional) + route53: + version_added: 2.8 + type: dict + description: + - The Route53 integration configuration.; + Expects the following key - + domains (List of Objects expecting the following keys - + hosted_zone_id (String), + record_sets (List of Objects expecting the following keys - + name (String) + use_public_ip (Boolean))) + scheduled_tasks: + type: list description: - - (List of Objects) a list of hash/dictionaries of scheduled tasks to configure in the elastigroup; + - a list of hash/dictionaries of scheduled tasks to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - adjustment (Integer), @@ -307,74 +526,103 @@ is_enabled (Boolean) security_group_ids: + type: list description: - - (List of Strings) One or more security group IDs. ; + - One or more security group IDs. ; In case of update it will override the existing Security Group with the new given array required: true shutdown_script: + type: str description: - - (String) The Base64-encoded shutdown script that executes prior to instance termination. + - The Base64-encoded shutdown script that executes prior to instance termination. Encode before setting. signals: + type: list description: - - (List of Objects) a list of hash/dictionaries of signals to configure in the elastigroup; + - a list of hash/dictionaries of signals to configure in the elastigroup; keys allowed are - name (String, required), timeout (Integer) spin_up_time: + type: int description: - - (Integer) spin up time, in seconds, for the instance + - spin up time, in seconds, for the instance spot_instance_types: + type: list description: - - (List of Strings) Spot instance type that will be provisioned. + - Spot instance type that will be provisioned. required: true - state: - choices: - - present - - absent + stateful_deallocation_should_delete_network_interfaces: + version_added: 2.8 + type: bool + description: + - Enable deletion of network interfaces on stateful group deletion + + stateful_deallocation_should_delete_snapshots: + version_added: 2.8 + type: bool + description: + - Enable deletion of snapshots on stateful group deletion + + stateful_deallocation_should_delete_images: + version_added: 2.8 + type: bool + description: + - Enable deletion of images on stateful group deletion + + stateful_deallocation_should_delete_volumes: + version_added: 2.8 + type: bool description: - - (String) create or delete the elastigroup + - Enable deletion of volumes on stateful group deletion tags: + type: list description: - - (List of tagKey:tagValue paris) a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); + - a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); target: + type: int description: - - (Integer) The number of instances to launch + - The number of instances to launch required: true target_group_arns: + type: list description: - - (List of Strings) List of target group arns instances should be registered to + - List of target group arns instances should be registered to tenancy: + type: str choices: - default - dedicated description: - - (String) dedicated vs shared tenancy + - dedicated vs shared tenancy terminate_at_end_of_billing_hour: + type: bool description: - - (Boolean) terminate at the end of billing hour + - terminate at the end of billing hour unit: + type: str choices: - instance - weight description: - - (String) The capacity unit to launch instances by. + - The capacity unit to launch instances by. required: true up_scaling_policies: + type: list description: - - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup; + - a list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - policy_name (String, required), @@ -397,8 +645,9 @@ down_scaling_policies: + type: list description: - - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup; + - a list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - policy_name (String, required), @@ -420,8 +669,9 @@ minimum (String) target_tracking_policies: + type: list description: - - (List of Objects) a list of hash/dictionaries of target tracking policies to configure in the elastigroup; + - a list of hash/dictionaries of target tracking policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - policy_name (String, required), @@ -434,33 +684,34 @@ target (String, required) uniqueness_by: + type: str choices: - id - name description: - - (String) If your group names are not unique, you may use this feature to update or delete a specific group. + - If your group names are not unique, you may use this feature to update or delete a specific group. Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created. - user_data: + type: str description: - - (String) Base64-encoded MIME user data. Encode before setting the value. - + - Base64-encoded MIME user data. Encode before setting the value. utilize_reserved_instances: + type: bool description: - - (Boolean) In case of any available Reserved Instances, + - In case of any available Reserved Instances, Elastigroup will utilize your reservations before purchasing Spot instances. - wait_for_instances: + type: bool description: - - (Boolean) Whether or not the elastigroup creation / update actions should wait for the instances to spin - + - Whether or not the elastigroup creation / update actions should wait for the instances to spin wait_timeout: + type: int description: - - (Integer) How long the module should wait for instances before failing the action.; + - How long the module should wait for instances before failing the action.; Only works if wait_for_instances is True. """ @@ -740,7 +991,7 @@ group_id: description: Created / Updated group's ID. returned: success - type: string + type: str sample: "sig-12345" ''' @@ -751,6 +1002,7 @@ import os import time from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback try: import spotinst_sdk as spotinst @@ -855,6 +1107,15 @@ 'should_persist_block_devices', 'should_persist_private_ip') +revert_to_spot_fields = ('perform_at', + 'time_windows') + +elastic_beanstalk_platform_update_fields = ('perform_at', + 'time_window', + 'update_level') + +elastic_beanstalk_managed_actions_fields = ('platform_update') + strategy_fields = ('risk', 'utilize_reserved_instances', 'fallback_to_od', @@ -862,7 +1123,8 @@ 'availability_vs_cost', 'draining_timeout', 'spin_up_time', - 'lifetime_period') + 'lifetime_period', + 'revert_to_spot') ebs_fields = ('delete_on_termination', 'encrypted', @@ -875,8 +1137,82 @@ 'virtual_name', 'no_device') + kubernetes_fields = ('api_server', - 'token') + 'token', + 'integration_mode', + 'cluster_identifier') + +kubernetes_auto_scale_fields = ('is_enabled', 'is_auto_config', 'cooldown') + +kubernetes_headroom_fields = ( + 'cpu_per_unit', + 'memory_per_unit', + 'num_of_units') + +kubernetes_labels_fields = ('key', 'value') + +kubernetes_down_fields = ('evaluation_periods') + +nomad_fields = ('master_host', 'master_port', 'acl_token') + +nomad_auto_scale_fields = ('is_enabled', 'is_auto_config', 'cooldown') + +nomad_headroom_fields = ('cpu_per_unit', 'memory_per_unit', 'num_of_units') + +nomad_constraints_fields = ('key', 'value') + +nomad_down_fields = ('evaluation_periods') + +docker_swarm_fields = ('master_host', 'master_port') + +docker_swarm_auto_scale_fields = ('is_enabled', 'cooldown') + +docker_swarm_headroom_fields = ( + 'cpu_per_unit', + 'memory_per_unit', + 'num_of_units') + +docker_swarm_down_fields = ('evaluation_periods') + +route53_domain_fields = ('hosted_zone_id',) + +route53_record_set_fields = ('name', 'use_public_ip') + +mlb_runtime_fields = ('deployment_id',) + +mlb_load_balancers_fields = ( + 'type', + 'target_set_id', + 'balancer_id', + 'auto_weight', + 'az_awareness') + +elastic_beanstalk_fields = ('environment_id',) + +elastic_beanstalk_deployment_fields = ('automatic_roll', + 'batch_size_percentage', + 'grace_period') + +elastic_beanstalk_strategy_fields = ('action', 'should_drain_instances') + +stateful_deallocation_fields = ( + dict( + ansible_field_name='stateful_deallocation_should_delete_images', + spotinst_field_name='should_delete_images'), + dict( + ansible_field_name='stateful_deallocation_should_delete_snapshots', + spotinst_field_name='should_delete_snapshots'), + dict( + ansible_field_name='stateful_deallocation_should_delete_network_interfaces', + spotinst_field_name='should_delete_network_interfaces'), + dict( + ansible_field_name='stateful_deallocation_should_delete_volumes', + spotinst_field_name='should_delete_volumes')) + +code_deploy_fields = ('clean_up_on_failure', 'terminate_instance_on_failure') + +code_deploy_deployment_fields = ('application_name', 'deployment_group_name') right_scale_fields = ('account_id', 'refresh_token') @@ -904,7 +1240,15 @@ ecs_fields = ('cluster_name',) -multai_fields = ('multai_token',) +ecs_auto_scale_fields = ('is_enabled', 'is_auto_config', 'cooldown') + +ecs_headroom_fields = ('cpu_per_unit', 'memory_per_unit', 'num_of_units') + +ecs_attributes_fields = ('key', 'value') + +ecs_down_fields = ('evaluation_periods') + +multai_fields = ('multai_token') def handle_elastigroup(client, module): @@ -942,9 +1286,10 @@ def handle_elastigroup(client, module): has_changed = False else: eg = expand_elastigroup(module, is_update=True) + auto_apply_tags = module.params.get('auto_apply_tags') if state == 'present': - group = client.update_elastigroup(group_update=eg, group_id=group_id) + group = client.update_elastigroup(group_update=eg, group_id=group_id, auto_apply_tags=auto_apply_tags) message = 'Updated group successfully.' try: @@ -964,12 +1309,26 @@ def handle_elastigroup(client, module): elif state == 'absent': try: - client.delete_elastigroup(group_id=group_id) + stfl_dealloc_request = expand_fields( + stateful_deallocation_fields, + module.params, 'StatefulDeallocation') + if stfl_dealloc_request. \ + should_delete_network_interfaces is True or \ + stfl_dealloc_request.should_delete_images is True or \ + stfl_dealloc_request.should_delete_volumes is True or \ + stfl_dealloc_request.should_delete_snapshots is True: + client.delete_elastigroup_with_deallocation( + group_id=group_id, + stateful_deallocation=stfl_dealloc_request) + else: + client.delete_elastigroup(group_id=group_id) except SpotinstClientException as exc: if "GROUP_DOESNT_EXIST" in exc.message: pass else: - module.fail_json(msg="Error while attempting to delete group : " + exc.message) + module.fail_json( + msg="Error while attempting to delete group :" + " " + exc.message) message = 'Deleted group successfully.' has_changed = True @@ -1031,7 +1390,7 @@ def find_group_with_same_name(groups, name): def expand_elastigroup(module, is_update): - do_not_update = module.params['do_not_update'] + do_not_update = module.params.get('do_not_update') or [] name = module.params.get('name') eg = spotinst.aws_elastigroup.Elastigroup() @@ -1061,12 +1420,15 @@ def expand_elastigroup(module, is_update): def expand_compute(eg, module, is_update, do_not_update): - elastic_ips = module.params['elastic_ips'] + elastic_ips = module.params.get('elastic_ips') on_demand_instance_type = module.params.get('on_demand_instance_type') - spot_instance_types = module.params['spot_instance_types'] - ebs_volume_pool = module.params['ebs_volume_pool'] - availability_zones_list = module.params['availability_zones'] + spot_instance_types = module.params.get('spot_instance_types') + ebs_volume_pool = module.params.get('ebs_volume_pool') + availability_zones_list = module.params.get('availability_zones') + private_ips = module.params.get('private_ips') product = module.params.get('product') + preferred_spot_instance_types = module.params.get( + 'preferred_spot_instance_types') eg_compute = spotinst.aws_elastigroup.Compute() @@ -1078,6 +1440,12 @@ def expand_compute(eg, module, is_update, do_not_update): if elastic_ips is not None: eg_compute.elastic_ips = elastic_ips + if private_ips: + eg_compute.private_ips = private_ips + + if preferred_spot_instance_types: + eg_instance_types.preferred_spot = preferred_spot_instance_types + if on_demand_instance_type or spot_instance_types is not None: eg_instance_types = spotinst.aws_elastigroup.InstanceTypes() @@ -1120,14 +1488,16 @@ def expand_ebs_volume_pool(eg_compute, ebs_volumes_list): def expand_launch_spec(eg_compute, module, is_update, do_not_update): eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification') - if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None: + if module.params.get('iam_role_arn') is not None or module.params.get('iam_role_name') is not None: eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole') - tags = module.params['tags'] - load_balancers = module.params['load_balancers'] - target_group_arns = module.params['target_group_arns'] - block_device_mappings = module.params['block_device_mappings'] - network_interfaces = module.params['network_interfaces'] + tags = module.params.get('tags') + load_balancers = module.params.get('load_balancers') + mlb_load_balancers = module.params.get('mlb_load_balancers') + target_group_arns = module.params.get('target_group_arns') + block_device_mappings = module.params.get('block_device_mappings') + network_interfaces = module.params.get('network_interfaces') + credit_specification = module.params.get('credit_specification') if is_update is True: if 'image_id' in do_not_update: @@ -1135,23 +1505,44 @@ def expand_launch_spec(eg_compute, module, is_update, do_not_update): expand_tags(eg_launch_spec, tags) - expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns) + expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns, mlb_load_balancers) expand_block_device_mappings(eg_launch_spec, block_device_mappings) expand_network_interfaces(eg_launch_spec, network_interfaces) + expand_credit_specification(eg_launch_spec, credit_specification) + eg_compute.launch_specification = eg_launch_spec +def expand_credit_specification(eg_launch_spec, credit_specification): + eg_credit_specification = None + + if credit_specification is not None: + eg_credit_specification = spotinst.aws_elastigroup.CreditSpecification() + cpu_credits = credit_specification.get('cpu_credits') + + if cpu_credits is not None: + eg_credit_specification.cpu_credits = cpu_credits + + eg_launch_spec.credit_specification = eg_credit_specification + + def expand_integrations(eg, module): rancher = module.params.get('rancher') mesosphere = module.params.get('mesosphere') ecs = module.params.get('ecs') kubernetes = module.params.get('kubernetes') + nomad = module.params.get('nomad') + docker_swarm = module.params.get('docker_swarm') + route53 = module.params.get('route53') right_scale = module.params.get('right_scale') opsworks = module.params.get('opsworks') chef = module.params.get('chef') + mlb_runtime = module.params.get('mlb_runtime') + elastic_beanstalk = module.params.get('elastic_beanstalk') + code_deploy = module.params.get('code_deploy') integration_exists = False @@ -1162,11 +1553,36 @@ def expand_integrations(eg, module): integration_exists = True if ecs is not None: - eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration') + expand_ecs(eg_integrations, ecs) integration_exists = True if kubernetes is not None: - eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration') + expand_kubernetes(eg_integrations, kubernetes) + integration_exists = True + + if nomad is not None: + expand_nomad(eg_integrations, nomad) + integration_exists = True + + if docker_swarm is not None: + expand_docker_swarm(eg_integrations, docker_swarm) + integration_exists = True + + if route53 is not None: + expand_route53(eg_integrations, route53) + integration_exists = True + + if mlb_runtime is not None: + eg_integrations.mlb_runtime = expand_fields( + mlb_runtime_fields, mlb_runtime, 'MlbRuntimeConfiguration') + integration_exists = True + + if elastic_beanstalk: + expand_elastic_beanstalk(eg_integrations, elastic_beanstalk) + integration_exists = True + + if code_deploy is not None: + expand_code_deploy(eg_integrations, code_deploy) integration_exists = True if right_scale is not None: @@ -1189,6 +1605,215 @@ def expand_integrations(eg, module): eg.third_parties_integration = eg_integrations +def expand_ecs(eg_integrations, ecs_config): + ecs = expand_fields(ecs_fields, ecs_config, 'EcsConfiguration') + ecs_auto_scale_config = ecs_config.get('auto_scale', None) + + if ecs_auto_scale_config: + ecs.auto_scale = expand_fields( + ecs_auto_scale_fields, + ecs_auto_scale_config, + 'EcsAutoScaleConfiguration') + + ecs_headroom_config = ecs_auto_scale_config.get('headroom', None) + if ecs_headroom_config: + ecs.auto_scale.headroom = expand_fields( + ecs_headroom_fields, + ecs_headroom_config, + 'EcsAutoScalerHeadroomConfiguration') + + ecs_attributes_config = ecs_auto_scale_config.get('attributes', None) + if ecs_attributes_config: + ecs.auto_scale.attributes = expand_list( + ecs_attributes_config, + ecs_attributes_fields, + 'EcsAutoScalerAttributeConfiguration') + + ecs_down_config = ecs_auto_scale_config.get('down', None) + if ecs_down_config: + ecs.auto_scale.down = expand_fields( + ecs_down_fields, ecs_down_config, + 'EcsAutoScalerDownConfiguration') + + eg_integrations.ecs = ecs + + +def expand_nomad(eg_integrations, nomad_config): + nomad = expand_fields(nomad_fields, nomad_config, 'NomadConfiguration') + nomad_auto_scale_config = nomad_config.get('auto_scale', None) + + if nomad_auto_scale_config: + nomad.auto_scale = expand_fields( + nomad_auto_scale_fields, + nomad_auto_scale_config, + 'NomadAutoScalerConfiguration') + + nomad_headroom_config = nomad_auto_scale_config.get('headroom', None) + if nomad_headroom_config: + nomad.auto_scale.headroom = expand_fields( + nomad_headroom_fields, + nomad_headroom_config, + 'NomadAutoScalerHeadroomConfiguration') + + nomad_constraints_config = nomad_auto_scale_config.get( + 'constraints', None) + if nomad_constraints_config: + nomad.auto_scale.constraints = expand_list( + nomad_constraints_config, + nomad_constraints_fields, + 'NomadAutoScalerConstraintsConfiguration') + + nomad_down_config = nomad_auto_scale_config.get('down', None) + if nomad_down_config: + nomad.auto_scale.down = expand_fields( + nomad_down_fields, + nomad_down_config, + 'NomadAutoScalerDownConfiguration') + + eg_integrations.nomad = nomad + + +def expand_code_deploy(eg_integrations, code_deploy_config): + code_deploy = expand_fields( + code_deploy_fields, code_deploy_config, 'CodeDeployConfiguration') + + code_deploy_deployment_config = code_deploy_config.get( + 'deployment_groups', None) + + if code_deploy_deployment_config: + code_deploy.deployment_groups = expand_list( + code_deploy_deployment_config, code_deploy_deployment_fields, + 'CodeDeployDeploymentGroupsConfiguration') + + eg_integrations.code_deploy = code_deploy + + +def expand_docker_swarm(eg_integrations, docker_swarm_config): + docker_swarm = expand_fields( + docker_swarm_fields, + docker_swarm_config, + 'DockerSwarmConfiguration') + docker_swarm_auto_scale_config = docker_swarm_config.get( + 'auto_scale', None) + + if docker_swarm_auto_scale_config: + docker_swarm.auto_scale = expand_fields( + docker_swarm_auto_scale_fields, + docker_swarm_auto_scale_config, + 'DockerSwarmAutoScalerConfiguration') + + docker_swarm_headroom_config = docker_swarm_auto_scale_config.get( + 'headroom', None) + if docker_swarm_headroom_config: + docker_swarm.auto_scale.headroom = expand_fields( + docker_swarm_headroom_fields, + docker_swarm_headroom_config, + 'DockerSwarmAutoScalerHeadroomConfiguration') + + docker_swarm_down_config = docker_swarm_auto_scale_config.get( + 'down', None) + if docker_swarm_down_config: + docker_swarm.auto_scale.down = expand_fields( + docker_swarm_down_fields, + docker_swarm_down_config, + 'DockerSwarmAutoScalerDownConfiguration') + + eg_integrations.docker_swarm = docker_swarm + + +def expand_route53(eg_integrations, route53_config): + route53 = spotinst.aws_elastigroup.Route53Configuration() + domains_configuration = route53_config.get('domains', None) + + if domains_configuration: + route53.domains = expand_list( + domains_configuration, + route53_domain_fields, + 'Route53DomainsConfiguration') + + for i in range(len(route53.domains)): + expanded_domain = route53.domains[i] + raw_domain = domains_configuration[i] + expanded_domain.record_sets = expand_list( + raw_domain['record_sets'], + route53_record_set_fields, + 'Route53RecordSetsConfiguration') + + eg_integrations.route53 = route53 + + +def expand_elastic_beanstalk(eg_integrations, elastic_beanstalk_config): + elastic_beanstalk = expand_fields( + elastic_beanstalk_fields, elastic_beanstalk_config, 'ElasticBeanstalk') + + elastic_beanstalk_deployment = elastic_beanstalk_config.get( + 'deployment_preferences', None) + + elastic_beanstalk_managed_actions = elastic_beanstalk_config.get( + 'managed_actions', None) + + if elastic_beanstalk_deployment: + elastic_beanstalk.deployment_preferences = expand_fields( + elastic_beanstalk_deployment_fields, elastic_beanstalk_deployment, + 'DeploymentPreferences') + if elastic_beanstalk.deployment_preferences and elastic_beanstalk_deployment.get('strategy'): + elastic_beanstalk.deployment_preferences.strategy = \ + expand_fields(elastic_beanstalk_strategy_fields, + elastic_beanstalk_deployment['strategy'], + 'BeanstalkDeploymentStrategy') + + if elastic_beanstalk_managed_actions: + elastic_beanstalk.managed_actions = expand_fields( + elastic_beanstalk_managed_actions_fields, elastic_beanstalk_managed_actions, + 'ManagedActions') + + if elastic_beanstalk.managed_actions: + elastic_beanstalk.managed_actions.platform_update = expand_fields( + elastic_beanstalk_platform_update_fields, elastic_beanstalk_managed_actions['platform_update'], + 'PlatformUpdate') + + eg_integrations.elastic_beanstalk = elastic_beanstalk + + +def expand_kubernetes(eg_integrations, kubernetes_config): + kubernetes = expand_fields( + kubernetes_fields, + kubernetes_config, + 'KubernetesConfiguration') + kubernetes_auto_scale_config = kubernetes_config.get('auto_scale', None) + + if kubernetes_auto_scale_config: + kubernetes.auto_scale = expand_fields( + kubernetes_auto_scale_fields, + kubernetes_auto_scale_config, + 'KubernetesAutoScalerConfiguration') + + kubernetes_headroom_config = kubernetes_auto_scale_config.get( + 'auto_scale', None) + if kubernetes_headroom_config: + kubernetes.auto_scale.headroom = expand_fields( + kubernetes_headroom_fields, + kubernetes_headroom_config, + 'KubernetesAutoScalerHeadroomConfiguration') + + kubernetes_labels_config = kubernetes_auto_scale_config.get( + 'labels', None) + if kubernetes_labels_config: + kubernetes.auto_scale.labels = expand_list( + kubernetes_labels_config, + kubernetes_labels_fields, + 'KubernetesAutoScalerLabelsConfiguration') + + kubernetes_down_config = kubernetes_auto_scale_config.get('down', None) + if kubernetes_down_config: + kubernetes.auto_scale.down = expand_fields( + kubernetes_down_fields, + kubernetes_down_config, + 'KubernetesAutoScalerDownConfiguration') + + eg_integrations.kubernetes = kubernetes + + def expand_capacity(eg, module, is_update, do_not_update): eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity') @@ -1204,14 +1829,14 @@ def expand_capacity(eg, module, is_update, do_not_update): def expand_strategy(eg, module): persistence = module.params.get('persistence') signals = module.params.get('signals') + revert_to_spot = module.params.get('revert_to_spot') eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy') terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour') if terminate_at_end_of_billing_hour is not None: - eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields, - module.params, 'ScalingStrategy') + eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields, module.params, 'ScalingStrategy') if persistence is not None: eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence') @@ -1222,6 +1847,9 @@ def expand_strategy(eg, module): if len(eg_signals) > 0: eg_strategy.signals = eg_signals + if revert_to_spot is not None: + eg_strategy.revert_to_spot = expand_fields(revert_to_spot_fields, revert_to_spot, "RevertToSpot") + eg.strategy = eg_strategy @@ -1251,7 +1879,7 @@ def expand_scheduled_tasks(eg, module): eg.scheduling = eg_scheduling -def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns): +def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns, mlb_load_balancers): if load_balancers is not None or target_group_arns is not None: eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig() eg_total_lbs = [] @@ -1272,6 +1900,17 @@ def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns): eg_elb.type = 'TARGET_GROUP' eg_total_lbs.append(eg_elb) + if mlb_load_balancers: + mlbs = expand_list( + mlb_load_balancers, + mlb_load_balancers_fields, + 'LoadBalancer') + + for mlb in mlbs: + mlb.type = "MULTAI_TARGET_SET" + + eg_total_lbs.extend(mlbs) + if len(eg_total_lbs) > 0: eg_load_balancers_config.load_balancers = eg_total_lbs eg_launchspec.load_balancers_config = eg_load_balancers_config @@ -1283,10 +1922,11 @@ def expand_tags(eg_launchspec, tags): for tag in tags: eg_tag = spotinst.aws_elastigroup.Tag() - if tag.keys(): - eg_tag.tag_key = tag.keys()[0] - if tag.values(): - eg_tag.tag_value = tag.values()[0] + + if list(tag): + eg_tag.tag_key = list(tag)[0] + if tag[list(tag)[0]]: + eg_tag.tag_value = tag[list(tag)[0]] eg_tags.append(eg_tag) @@ -1329,9 +1969,9 @@ def expand_network_interfaces(eg_launchspec, enis): def expand_scaling(eg, module): - up_scaling_policies = module.params['up_scaling_policies'] - down_scaling_policies = module.params['down_scaling_policies'] - target_tracking_policies = module.params['target_tracking_policies'] + up_scaling_policies = module.params.get('up_scaling_policies') + down_scaling_policies = module.params.get('down_scaling_policies') + target_tracking_policies = module.params.get('target_tracking_policies') eg_scaling = spotinst.aws_elastigroup.Scaling() @@ -1404,15 +2044,53 @@ def expand_target_tracking_policies(tracking_policies): return eg_tracking_policies +def get_client(module): + # Retrieve creds file variables + creds_file_loaded_vars = dict() + + credentials_path = module.params.get('credentials_path') + + if credentials_path is not None: + try: + with open(credentials_path, "r") as creds: + for line in creds: + eq_index = line.find('=') + var_name = line[:eq_index].strip() + string_value = line[eq_index + 1:].strip() + creds_file_loaded_vars[var_name] = string_value + except IOError: + pass + # End of creds file retrieval + + token = module.params.get('token') + if not token: + token = creds_file_loaded_vars.get("token") + + account = module.params.get('account_id') + if not account: + account = creds_file_loaded_vars.get("account") + + client = spotinst.SpotinstClient(auth_token=token, print_output=False) + + if account is not None: + client = spotinst.SpotinstClient(auth_token=token, account_id=account, print_output=False) + + return client + + def main(): fields = dict( - account_id=dict(type='str'), + account_id=dict(type='str', fallback=(env_fallback, ['SPOTINST_ACCOUNT_ID', 'ACCOUNT'])), + auto_apply_tags=dict(type='bool'), availability_vs_cost=dict(type='str', required=True), availability_zones=dict(type='list', required=True), block_device_mappings=dict(type='list'), chef=dict(type='dict'), + code_deploy=dict(type='dict'), credentials_path=dict(type='path', default="~/.spotinst/credentials"), + credit_specification=dict(type='dict'), do_not_update=dict(default=[], type='list'), + docker_swarm=dict(type='dict'), down_scaling_policies=dict(type='list'), draining_timeout=dict(type='int'), ebs_optimized=dict(type='bool'), @@ -1435,20 +2113,27 @@ def main(): max_size=dict(type='int', required=True), mesosphere=dict(type='dict'), min_size=dict(type='int', required=True), + mlb_runtime=dict(type='dict'), + mlb_load_balancers=dict(type='list'), monitoring=dict(type='str'), multai_load_balancers=dict(type='list'), multai_token=dict(type='str'), name=dict(type='str', required=True), network_interfaces=dict(type='list'), + nomad=dict(type='dict'), on_demand_count=dict(type='int'), on_demand_instance_type=dict(type='str'), opsworks=dict(type='dict'), persistence=dict(type='dict'), + preferred_spot_instance_types=dict(type='list'), + private_ips=dict(type='list'), product=dict(type='str', required=True), rancher=dict(type='dict'), + revert_to_spot=dict(type='dict'), right_scale=dict(type='dict'), risk=dict(type='int'), roll_config=dict(type='dict'), + route53=dict(type='dict'), scheduled_tasks=dict(type='list'), security_group_ids=dict(type='list', required=True), shutdown_script=dict(type='str'), @@ -1456,12 +2141,16 @@ def main(): spin_up_time=dict(type='int'), spot_instance_types=dict(type='list', required=True), state=dict(default='present', choices=['present', 'absent']), + stateful_deallocation_should_delete_images=dict(type='bool'), + stateful_deallocation_should_delete_network_interfaces=dict(type='bool'), + stateful_deallocation_should_delete_snapshots=dict(type='bool'), + stateful_deallocation_should_delete_volumes=dict(type='bool'), tags=dict(type='list'), target=dict(type='int', required=True), target_group_arns=dict(type='list'), tenancy=dict(type='str'), terminate_at_end_of_billing_hour=dict(type='bool'), - token=dict(type='str'), + token=dict(type='str', fallback=(env_fallback, ['SPOTINST_TOKEN'])), unit=dict(type='str'), user_data=dict(type='str'), utilize_reserved_instances=dict(type='bool'), @@ -1477,38 +2166,7 @@ def main(): if not HAS_SPOTINST_SDK: module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)") - # Retrieve creds file variables - creds_file_loaded_vars = dict() - - credentials_path = module.params.get('credentials_path') - - try: - with open(credentials_path, "r") as creds: - for line in creds: - eq_index = line.find('=') - var_name = line[:eq_index].strip() - string_value = line[eq_index + 1:].strip() - creds_file_loaded_vars[var_name] = string_value - except IOError: - pass - # End of creds file retrieval - - token = module.params.get('token') - if not token: - token = os.environ.get('SPOTINST_TOKEN') - if not token: - token = creds_file_loaded_vars.get("token") - - account = module.params.get('account_id') - if not account: - account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT') - if not account: - account = creds_file_loaded_vars.get("account") - - client = spotinst.SpotinstClient(auth_token=token, print_output=False) - - if account is not None: - client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account) + client = get_client(module=module) group_id, message, has_changed = handle_elastigroup(client=client, module=module) diff --git a/spotinst/spotinst_event_subscription.py b/spotinst/spotinst_event_subscription.py new file mode 100644 index 0000000..ceb843b --- /dev/null +++ b/spotinst/spotinst_event_subscription.py @@ -0,0 +1,287 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} +DOCUMENTATION = """ +--- +module: spotinst_event_subscription +version_added: 2.8 +short_description: Create, update or delete Spotinst Ocean +author: Spotinst (@jeffnoehren) +description: + - Can create, update, or delete Spotinst Ocean + You will have to have a credentials file in this location - /.spotinst/credentials + The credentials file must contain a row that looks like this + token = + Full documentation available at U(https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-) +requirements: + - python >= 2.7 + - spotinst_sdk >= 1.0.44 +options: + + id: + description: + - Parameters used for Updating or Deleting subscription. + type: str + + credentials_path: + default: "~/.spotinst/credentials" + description: + - Optional parameter that allows to set a non-default credentials path. + type: str + + account_id: + description: + - Optional parameter that allows to set an account-id inside the module configuration. By default this is retrieved from the credentials path + type: str + + token: + description: + - Optional parameter that allows to set an token inside the module configuration. By default this is retrieved from the credentials path + type: str + + state: + type: str + choices: + - present + - absent + default: present + description: + - create update or delete + + resource_id: + description: + - Resource that the subscription will be on + type: str + + protocol: + description: + - (String) Type of desired protocol + + endpoint: + description: + - Endpoint for Subscription to hit + type: str + + event_type: + description: + - Type of desired event + type: str + + event_format: + description: + - Event body to be sent to endpoint + type: str +""" +EXAMPLES = """ +#In this basic example, we create an event subscription + +- hosts: localhost + tasks: + - name: create ocean + spotinst_event_subscription: + account_id: + token: + state: present + id: sis-e62dfd0f + resource_id: sig-992a78db + protocol: web + endpoint: https://webhook.com + event_type: GROUP_UPDATED + event_format: { "subject" : "%s", "message" : "%s" } + register: result + - debug: var=result +""" +RETURN = """ +--- +result: + type: str + sample: sis-e62dfd0f + returned: success + description: Created Subscription successfully +""" + +HAS_SPOTINST_SDK = False +__metaclass__ = type + +import os +import time +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback + +try: + import spotinst_sdk as spotinst + from spotinst_sdk import SpotinstClientException + + HAS_SPOTINST_SDK = True + +except ImportError: + pass + + +# region Request Builder Funcitons +def expand_subscription_request(module): + event_subscription = spotinst.spotinst_event_subscription.Subscription() + + resource_id = module.params.get('resource_id') + protocol = module.params.get('protocol') + endpoint = module.params.get('endpoint') + event_type = module.params.get('event_type') + event_format = module.params.get('event_format') + + if resource_id is not None: + event_subscription.resource_id = resource_id + + if protocol is not None: + event_subscription.protocol = protocol + + if endpoint is not None: + event_subscription.endpoint = endpoint + + if event_type is not None: + event_subscription.event_type = event_type + + if event_format is not None: + event_subscription.event_format = event_format + + return event_subscription +# endregion + + +# region Util Functions +def handle_subscription(client, module): + subscription_id = None + message = None + has_changed = False + + request_type, subscription_id = get_request_type_and_id(client=client, module=module) + + if request_type == "create": + subscription_id, message, has_changed = handle_create(client=client, module=module) + elif request_type == "update": + subscription_id, message, has_changed = handle_update(client=client, module=module, subscription_id=subscription_id) + elif request_type == "delete": + subscription_id, message, has_changed = handle_delete(client=client, module=module, subscription_id=subscription_id) + else: + module.fail_json(msg="Action Not Allowed") + + return subscription_id, message, has_changed + + +def get_request_type_and_id(client, module): + request_type = None + subscription_id = module.params.get('id') + state = module.params.get('state') + + if state == 'present': + if subscription_id is None: + request_type = "create" + + else: + request_type = "update" + + elif state == 'absent': + request_type = "delete" + + return request_type, subscription_id + + +def get_client(module): + # Retrieve creds file variables + creds_file_loaded_vars = dict() + + credentials_path = module.params.get('credentials_path') + + if credentials_path is not None: + try: + with open(credentials_path, "r") as creds: + for line in creds: + eq_index = line.find('=') + var_name = line[:eq_index].strip() + string_value = line[eq_index + 1:].strip() + creds_file_loaded_vars[var_name] = string_value + except IOError: + pass + # End of creds file retrieval + + token = module.params.get('token') + if not token: + token = creds_file_loaded_vars.get("token") + + account = module.params.get('account_id') + if not account: + account = creds_file_loaded_vars.get("account") + + client = spotinst.SpotinstClient(auth_token=token, print_output=False) + + if account is not None: + client = spotinst.SpotinstClient(auth_token=token, account_id=account, print_output=False) + + return client +# endregion + + +# region Request Functions +def handle_create(client, module): + subscription_request = expand_subscription_request(module=module) + subscription = client.create_event_subscription(subscription=subscription_request) + + subscription_id = subscription['id'] + message = 'Created subscription successfully' + has_changed = True + + return subscription_id, message, has_changed + + +def handle_update(client, module, subscription_id): + subscription_request = expand_subscription_request(module=module) + client.update_event_subscription(subscription_id=subscription_id, subscription=subscription_request) + + message = 'Updated subscription successfully' + has_changed = True + + return subscription_id, message, has_changed + + +def handle_delete(client, module, subscription_id): + client.delete_event_subscription(subscription_id=subscription_id) + + message = 'Deleted subscription successfully' + has_changed = True + + return subscription_id, message, has_changed +# endregion + + +def main(): + fields = dict( + account_id=dict(type='str', fallback=(env_fallback, ['SPOTINST_ACCOUNT_ID', 'ACCOUNT'])), + token=dict(type='str', fallback=(env_fallback, ['SPOTINST_TOKEN'])), + state=dict(type='str', default='present', choices=['present', 'absent']), + id=dict(type='str'), + credentials_path=dict(type='path', default="~/.spotinst/credentials"), + + resource_id=dict(type='str'), + protocol=dict(type='str'), + endpoint=dict(type='str'), + event_type=dict(type='str'), + event_format=dict(type='dict')) + + module = AnsibleModule(argument_spec=fields) + + if not HAS_SPOTINST_SDK: + module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)") + + client = get_client(module=module) + + subscription_id, message, has_changed = handle_subscription(client=client, module=module) + + module.exit_json(changed=has_changed, subscription_id=subscription_id, message=message) + + +if __name__ == '__main__': + main() diff --git a/spotinst/spotinst_mrscaler.py b/spotinst/spotinst_mrscaler.py new file mode 100644 index 0000000..5a24dfd --- /dev/null +++ b/spotinst/spotinst_mrscaler.py @@ -0,0 +1,985 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} +DOCUMENTATION = """ +--- +module: spotinst_mrscaler +version_added: 2.8 +short_description: Create, update or delete Spotinst MrScaler +author: Spotinst (@jeffnoehren) +description: + - Can create, update, or delete Spotinst MrScaler + You will have to have a credentials file in this location - /.spotinst/credentials + The credentials file must contain a row that looks like this + token = + Full documentation available at U(https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-) +requirements: + - python >= 2.7 + - spotinst_sdk >= 1.0.44 +options: + + id: + description: + - (String) The group id if it already exists and you want to update, or delete it. + This will not work unless the uniqueness_by field is set to id. + When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. + + token: + type: str + description: + - Spotinst API Token + + credentials_path: + type: str + default: /root/.spotinst/credentials + description: + - Optional parameter that allows to set a non-default credentials path. + required: false + + account_id: + type: str + description: + - Optional parameter that allows to set an account-id inside the module configuration. By default this is retrieved from the credentials path + required: false + + state: + type: str + choices: + - present + - absent + default: present + description: + - create update or delete + + uniqueness_by: + type: str + choices: + - id + - name + default: name + description: + - If set to id an id must be provided, if name no id is needed + required: false + + name: + type: str + description: + - Name for EMR cluster + required: true + + description: + type: str + description: + - Description of EMR cluster + required: false + + region: + type: str + description: + - Region to deploy EMR cluster instance Groups + required: true + + strategy: + type: dict + description: + - Choose to create new cluster, clone an existing cluster or wrap an existing cluster + + scheduling: + type: dict + description: + - List of Scheduled tasks to perform + + scaling: + type: dict + description: + - Lists of up and down scaling policies + + compute: + type: dict + description: + - Schema that contains instance groups and other important resource parameters + + cluster: + type: dict + description: + - Schema that contains cluster parameters + +""" +EXAMPLES = """ +#Create an EMR Cluster + +- hosts: localhost + tasks: + - name: create emr + spotinst_mrScaler: + account_id: YOUR_ACCOUNT_ID + token: YOUR_SPOTINST_TOKEN + state: present + name: ansible_test_group + description: this is from ansible + region: us-west-2 + strategy: + new: + release_label: emr-5.17.0 + provisioning_timeout: + timeout: 15 + timeout_action: terminate + compute: + availability_zones: + - name: us-west-2b + subnet_id: + instance_groups: + master_group: + instance_types: + - m3.xlarge + target: 1 + life_cycle: ON_DEMAND + core_group: + instance_types: + - m3.xlarge + target: 1 + life_cycle: SPOT + task_group: + instance_types: + - m3.xlarge + capacity: + minimum: 0 + maximum: 0 + target: 0 + life_cycle: SPOT + emr_managed_master_security_group: sg-1234567 + emr_managed_slave_security_group: sg-1234567 + additional_master_security_groups: sg-1234567 + - sg-1234567 + additional_slave_security_groups: + - sg-1234567 + ec2_key_name: Noam-key + applications: + - name: Ganglia + version: "1.0" + - name: Hadoop + cluster: + visible_to_all_users: true + termination_protected: false + keep_job_flow_alive_when_no_steps: true + log_uri: s3://job-status + additional_info: "{'test':'more information'}" + job_flow_role: EMR_EC2_DefaultRole + security_configuration: test + register: result + - debug: var=result +""" +RETURN = """ +--- +result: + type: str + returned: success + sample: simrs-35124875 + description: Created EMR Cluster successfully. +""" +HAS_SPOTINST_SDK = False +__metaclass__ = type + +import os +import time +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback + +try: + import spotinst_sdk as spotinst + from spotinst_sdk import SpotinstClientException + + HAS_SPOTINST_SDK = True + +except ImportError: + pass + + +# region Request Builder Funcitons +def expand_emr_request(module, is_update): + do_not_update = module.params.get('do_not_update') or [] + + name = module.params.get('name') + description = module.params.get('description') + region = module.params.get('region') + + strategy = module.params.get('strategy') + scheduling = module.params.get('scheduling') + scaling = module.params.get('scaling') + compute = module.params.get('compute') + cluster = module.params.get('cluster') + + emr = spotinst.spotinst_emr.EMR() + + if name is not None: + emr.name = name + if description is not None: + emr.description = description + if region is not None and not is_update: + emr.region = region + + if not is_update: + # Strategy + if strategy is not None: + expand_strategy(emr=emr, strategy=strategy) + # Scheduling + if scheduling is not None: + expand_scheduling(emr=emr, scheduling=scheduling) + # Scaling + if scaling is not None: + expand_scaling(emr=emr, scaling=scaling) + + # Compute + if compute is not None: + expand_compute(emr=emr, compute=compute, is_update=is_update, do_not_update=do_not_update) + # Cluster + if cluster is not None: + expand_cluster(emr=emr, cluster=cluster, is_update=is_update, do_not_update=do_not_update) + + return emr + + +# region Strategy +def expand_strategy(emr, strategy): + emr_strategy = spotinst.spotinst_emr.Strategy() + + wrap = strategy.get('wrap') + clone = strategy.get('clone') + new = strategy.get('new') + provisioning_timeout = strategy.get('provisioning_timeout') + + if wrap is not None: + expand_wrap(emr_strategy=emr_strategy, wrap=wrap) + if clone is not None: + expand_clone(emr_strategy=emr_strategy, clone=clone) + if new is not None: + expand_new(emr_strategy=emr_strategy, new=new) + if provisioning_timeout is not None: + expand_provisioning_timeout(emr_strategy=emr_strategy, provisioning_timeout=provisioning_timeout) + + emr.strategy = emr_strategy + + +def expand_wrap(emr_strategy, wrap): + emr_wrapping = spotinst.spotinst_emr.Wrapping() + source_cluster_id = wrap.get('source_cluster_id') + + if source_cluster_id is not None: + emr_wrapping.source_cluster_id = source_cluster_id + + emr_strategy.wrapping = emr_wrapping + + +def expand_clone(emr_strategy, clone): + emr_cloning = spotinst.spotinst_emr.Cloning() + + origin_cluster_id = clone.get('origin_cluster_id') + include_steps = clone.get('include_steps') + number_of_retries = clone.get('number_of_retries') + + if origin_cluster_id is not None: + emr_cloning.origin_cluster_id = origin_cluster_id + if include_steps is not None: + emr_cloning.include_steps = include_steps + if number_of_retries is not None: + emr_cloning.number_of_retries = number_of_retries + + emr_strategy.cloning = emr_cloning + + +def expand_new(emr_strategy, new): + emr_new = spotinst.spotinst_emr.New() + + release_label = new.get('release_label') + number_of_retries = new.get('number_of_retries') + + if release_label is not None: + emr_new.release_label = release_label + if number_of_retries is not None: + emr_new.number_of_retries = number_of_retries + + emr_strategy.new = emr_new + + +def expand_provisioning_timeout(emr_strategy, provisioning_timeout): + emr_provisioning_timeout = spotinst.spotinst_emr.ProvisioningTimeout() + + timeout = provisioning_timeout.get('timeout') + timeout_action = provisioning_timeout.get('timeout_action') + + if timeout is not None: + emr_provisioning_timeout.timeout = timeout + if timeout_action is not None: + emr_provisioning_timeout.timeout_action = timeout_action + + emr_strategy.provisioning_timeout = emr_provisioning_timeout +# endregion + + +# region Compute +def expand_compute(emr, compute, is_update, do_not_update): + emr_compute = spotinst.spotinst_emr.Compute() + + ebs_root_volume_size = compute.get('ebs_root_volume_size') + availability_zones = compute.get('availability_zones') + bootstrap_actions = compute.get('bootstrap_actions') + steps = compute.get('steps') + instance_groups = compute.get('instance_groups') + configurations = compute.get('configurations') + emr_managed_master_security_group = compute.get('emr_managed_master_security_group') + emr_managed_slave_security_group = compute.get('emr_managed_slave_security_group') + additional_master_security_groups = compute.get('additional_master_security_groups') + service_access_security_group = compute.get('service_access_security_group') + custom_ami_id = compute.get('custom_ami_id') + repo_upgrade_on_boot = compute.get('repo_upgrade_on_boot') + additional_slave_security_groups = compute.get('additional_slave_security_groups') + ec2_key_name = compute.get('ec2_key_name') + applications = compute.get('applications') + + # params not able to be Updated + if not is_update: + if ebs_root_volume_size is not None: + emr_compute.ebs_root_volume_size = ebs_root_volume_size + + if availability_zones is not None: + emr_compute.availability_zones = availability_zones + + if bootstrap_actions is not None: + expand_bootstrap_actions(emr_compute=emr_compute, bootstrap_actions=bootstrap_actions) + + if steps is not None: + expand_steps(emr_compute=emr_compute, steps=steps) + + if configurations is not None: + expand_configurations(emr_compute=emr_compute, configurations=configurations) + + if emr_managed_master_security_group is not None: + emr_compute.emr_managed_master_security_group = emr_managed_master_security_group + + if emr_managed_slave_security_group is not None: + emr_compute.emr_managed_slave_security_group = emr_managed_slave_security_group + + if additional_master_security_groups is not None: + emr_compute.additional_master_security_groups = additional_master_security_groups + + if service_access_security_group is not None: + emr_compute.service_access_security_group = service_access_security_group + + if custom_ami_id is not None: + emr_compute.custom_ami_id = custom_ami_id + + if repo_upgrade_on_boot is not None: + emr_compute.repo_upgrade_on_boot = repo_upgrade_on_boot + + if additional_slave_security_groups is not None: + emr_compute.additional_slave_security_groups = additional_slave_security_groups + + if ec2_key_name is not None: + emr_compute.ec2_key_name = ec2_key_name + + if applications is not None: + expand_applications(emr_compute=emr_compute, applications=applications) + + # instance_groups is able to be Updated + if instance_groups is not None: + expand_instance_groups(emr_compute=emr_compute, instance_groups=instance_groups, is_update=is_update, do_not_update=do_not_update) + + emr.compute = emr_compute + + +def expand_bootstrap_actions(emr_compute, bootstrap_actions): + emr_bootstrap_actions = spotinst.spotinst_emr.BootstrapActions() + file = bootstrap_actions.get('file') + + if file is not None: + expand_file(schema=emr_bootstrap_actions, file=file) + + emr_compute.bootstrap_actions = emr_bootstrap_actions + + +def expand_steps(emr_compute, steps): + emr_steps = spotinst.spotinst_emr.Steps() + file = steps.get('file') + + if file is not None: + expand_file(schema=emr_steps, file=file) + + emr_compute.steps = emr_steps + + +# region Instance Groups +def expand_instance_groups(emr_compute, instance_groups, is_update, do_not_update): + emr_instance_groups = spotinst.spotinst_emr.InstanceGroups() + + master_group = instance_groups.get('master_group') + core_group = instance_groups.get('core_group') + task_group = instance_groups.get('task_group') + + # in create + if not is_update: + if master_group is not None: + expand_master_group(emr_instance_groups=emr_instance_groups, master_group=master_group) + if core_group is not None: + expand_core_group(emr_instance_groups=emr_instance_groups, core_group=core_group, is_update=is_update) + if task_group is not None: + expand_task_group(emr_instance_groups=emr_instance_groups, task_group=task_group, is_update=is_update) + + # in update + else: + if core_group is not None and 'core_group' not in do_not_update: + expand_core_group(emr_instance_groups=emr_instance_groups, core_group=core_group, is_update=is_update) + if task_group is not None and 'task_group' not in do_not_update: + expand_task_group(emr_instance_groups=emr_instance_groups, task_group=task_group, is_update=is_update) + + emr_compute.instance_groups = emr_instance_groups + + +def expand_master_group(emr_instance_groups, master_group): + emr_master_groups = spotinst.spotinst_emr.MasterGroup() + + instance_types = master_group.get('instance_types') + target = master_group.get('target') + life_cycle = master_group.get('life_cycle') + configurations = master_group.get('configurations') + + if instance_types is not None: + emr_master_groups.instance_types = instance_types + if target is not None: + emr_master_groups.target = target + if life_cycle is not None: + emr_master_groups.life_cycle = life_cycle + if configurations is not None: + expand_configurations(schema=emr_master_groups, configurations=configurations) + + emr_instance_groups.master_group = emr_master_groups + + +def expand_core_group(emr_instance_groups, core_group, is_update): + emr_core_group = spotinst.spotinst_emr.CoreGroup() + + instance_types = core_group.get('instance_types') + target = core_group.get('target') + capacity = core_group.get('capacity') + life_cycle = core_group.get('life_cycle') + ebs_configuration = core_group.get('ebs_configuration') + configurations = core_group.get('configurations') + + # Not able to Update + if not is_update: + if instance_types is not None: + emr_core_group.instance_types = instance_types + if target is not None: + emr_core_group.target = target + if life_cycle is not None: + emr_core_group.life_cycle = life_cycle + if ebs_configuration is not None: + expand_ebs_configuration(schema=emr_core_group, ebs_configuration=ebs_configuration) + if configurations is not None: + expand_configurations(schema=emr_core_group, configurations=configurations) + + if capacity is not None: + expand_capacity(schema=emr_core_group, capacity=capacity) + + emr_instance_groups.core_group = emr_core_group + + +def expand_task_group(emr_instance_groups, task_group, is_update): + emr_task_group = spotinst.spotinst_emr.TaskGroup() + + instance_types = task_group.get('instance_types') + capacity = task_group.get('capacity') + life_cycle = task_group.get('life_cycle') + ebs_configuration = task_group.get('ebs_configuration') + configurations = task_group.get('configurations') + + # Not able to Update + if not is_update: + if instance_types is not None: + emr_task_group.instance_types = instance_types + if life_cycle is not None: + emr_task_group.life_cycle = life_cycle + if ebs_configuration is not None: + expand_ebs_configuration(schema=emr_task_group, ebs_configuration=ebs_configuration) + if configurations is not None: + expand_configurations(schema=emr_task_group, configurations=configurations) + + if capacity is not None: + expand_capacity(schema=emr_task_group, capacity=capacity) + + emr_instance_groups.task_group = emr_task_group + + +def expand_ebs_configuration(schema, ebs_configuration): + emr_ebs_configuration = spotinst.spotinst_emr.EbsConfiguration() + + ebs_block_device_configs = ebs_configuration.get('ebs_block_device_configs') + ebs_optimized = ebs_configuration.get('ebs_optimized') + + if ebs_block_device_configs is not None: + emr_block_configs_list = [] + + for single_ebs_block_config in ebs_block_device_configs: + emr_single_ebs_block_config = spotinst.spotinst_emrSingleEbsConfig() + + volume_specification = single_ebs_block_config.get('volume_specification') + volumes_per_instance = single_ebs_block_config.get('volumes_per_instance') + + if volume_specification is not None: + emr_single_ebs_block_config.volume_specification = volume_specification + if volumes_per_instance is not None: + emr_single_ebs_block_config.volumes_per_instance = volumes_per_instance + + emr_block_configs_list.append(emr_single_ebs_block_config) + + emr_ebs_configuration.ebs_block_device_configs = emr_block_configs_list + + if ebs_optimized is not None: + emr_ebs_configuration.ebs_optimized = ebs_optimized + + schema.ebs_configuration = emr_ebs_configuration + + +def expand_capacity(schema, capacity): + emr_capacity = spotinst.spotinst_emr.Capacity() + + target = capacity.get('target') + maximum = capacity.get('maximum') + minimum = capacity.get('minimum') + + if target is not None: + emr_capacity.target = target + if maximum is not None: + emr_capacity.maximum = maximum + if minimum is not None: + emr_capacity.minimum = minimum + + schema.capacity = emr_capacity +# endregion + + +def expand_configurations(schema, configurations): + emr_configurations = spotinst.spotinst_emr.Configurations() + file = schema.get('file') + + if file is not None: + expand_file(schema=emr_configurations, file=file) + + emr_configurations.configurations = emr_configurations + + +def expand_applications(emr_compute, applications): + application_list = [] + + for single_application in applications: + emr_application = spotinst.spotinst_emr.Application() + + name = single_application.get('name') + args = single_application.get('args') + version = single_application.get('version') + + if name is not None: + emr_application.name = name + if args is not None: + emr_application.args = args + if version is not None: + emr_application.version = version + + application_list.append(emr_application) + + emr_compute.applications = application_list + + +def expand_file(schema, file): + emr_file = spotinst.spotinst_emr.File() + + bucket = file.get('bucket') + key = file.get('key') + + if bucket is not None: + emr_file.bucket = bucket + if key is not None: + emr_file.key = key + + schema.file = emr_file +# endregion + + +# region Cluster +def expand_cluster(emr, cluster, is_update, do_not_update): + emr_cluster = spotinst.spotinst_emr.Cluster() + + visible_to_all_users = cluster.get('visible_to_all_users') + termination_protected = cluster.get('termination_protected') + keep_job_flow_alive_when_no_steps = cluster.get('keep_job_flow_alive_when_no_steps') + log_uri = cluster.get('log_uri') + additional_info = cluster.get('additional_info') + job_flow_role = cluster.get('job_flow_role') + security_configuration = cluster.get('security_configuration') + + # in create + if not is_update: + if visible_to_all_users is not None: + emr_cluster.visible_to_all_users = visible_to_all_users + + if keep_job_flow_alive_when_no_steps is not None: + emr_cluster.keep_job_flow_alive_when_no_steps = keep_job_flow_alive_when_no_steps + + if log_uri is not None: + emr_cluster.log_uri = log_uri + + if additional_info is not None: + emr_cluster.additional_info = additional_info + + if job_flow_role is not None: + emr_cluster.job_flow_role = job_flow_role + + if security_configuration is not None: + emr_cluster.security_configuration = security_configuration + + if termination_protected is not None: + emr_cluster.termination_protected = termination_protected + + # in update + else: + if termination_protected is not None and 'termination_protected' not in do_not_update: + emr_cluster.termination_protected = termination_protected + + emr.cluster = emr_cluster +# endregion + + +# region scheduling +def expand_scheduling(emr, scheduling): + emr_scheduing = spotinst.spotinst_emr.Scheduling() + tasks = scheduling.get('scheduling') + + if tasks is not None: + expand_tasks(emr_scheduing=emr_scheduing, tasks=tasks) + + emr.scheduling = emr_scheduing + + +def expand_tasks(emr_scheduing, tasks): + task_list = [] + + for single_task in tasks: + task = spotinst.spotinst_emr.Task() + + is_enabled = single_task.get('is_enabled') + instance_group_type = single_task.get('instance_group_type') + task_type = single_task.get('task_type') + cron_expression = single_task.get('cron_expression') + target_capacity = single_task.get('target_capacity') + min_capacity = single_task.get('min_capacity') + max_capacity = single_task.get('max_capacity') + + if is_enabled is not None: + task.is_enabled = is_enabled + if instance_group_type is not None: + task.instance_group_type = instance_group_type + if task_type is not None: + task.task_type = task_type + if cron_expression is not None: + task.cron_expression = cron_expression + if target_capacity is not None: + task.target_capacity = target_capacity + if min_capacity is not None: + task.min_capacity = min_capacity + if max_capacity is not None: + task.max_capacity = max_capacity + + task_list.append(task) + + emr_scheduing.tasks = task_list +# endregion + + +# region Scaling +def expand_scaling(emr, scaling): + emr_scaling = spotinst.spotinst_emr.Scaling() + + up = scaling.get('up') + down = scaling.get('down') + + if up is not None: + expand_metrics(emr_scaling=emr_scaling, metrics=up, direction="up") + if down is not None: + expand_metrics(emr_scaling=emr_scaling, metrics=down, direction="down") + + emr.scaling = emr_scaling + + +def expand_metrics(emr_scaling, metrics, direction): + metric_list = [] + + for single_metric in metrics: + emr_metric = spotinst.spotinst_emr.Metric() + + metric_name = single_metric.get('metric_name') + statistic = single_metric.get('statistic') + unit = single_metric.get('unit') + threshold = single_metric.get('threshold') + adjustment = single_metric.get('adjustment') + namespace = single_metric.get('namespace') + period = single_metric.get('period') + evaluation_periods = single_metric.get('evaluation_periods') + action = single_metric.get('action') + cooldown = single_metric.get('cooldown') + dimensions = single_metric.get('dimensions') + operator = single_metric.get('operator') + + if metric_name is not None: + emr_metric.metric_name + + if statistic is not None: + emr_metric.statistic + + if unit is not None: + emr_metric.unit + + if threshold is not None: + emr_metric.threshold + + if adjustment is not None: + emr_metric.adjustment + + if namespace is not None: + emr_metric.namespace + + if period is not None: + emr_metric.period + + if evaluation_periods is not None: + emr_metric.evaluation_periods + + if action is not None: + expand_action(emr_metric=emr_metric, action=action) + + if cooldown is not None: + emr_metric.cooldown + + if dimensions is not None: + expand_dimensions(emr_metric=emr_metric, dimensions=dimensions) + + if operator is not None: + emr_metric.operator + + metric_list.append(emr_metric) + + if direction == "up": + emr_scaling.up = metric_list + + if direction == "down": + emr_scaling.down = metric_list + + +def expand_action(emr_metric, action): + emr_action = spotinst.spotinst_emr.Action() + + type_val = action.get('type') + adjustment = action.get('adjustment') + min_target_capacity = action.get('min_target_capacity') + target = action.get('target') + minimum = action.get('minimum') + maximum = action.get('maximum') + + if type_val is not None: + emr_action.type = type_val + if adjustment is not None: + emr_action.adjustment = adjustment + if min_target_capacity is not None: + emr_action.min_target_capacity = min_target_capacity + if target is not None: + emr_action.target = target + if minimum is not None: + emr_action.minimum = minimum + if maximum is not None: + emr_action.maximum = maximum + + emr_metric.action = emr_action + + +def expand_dimensions(emr_metric, dimensions): + dim_list = [] + + for single_dim in dimensions: + emr_dimension = spotinst.spotinst_emr.Dimension() + name = single_dim.get('name') + + if name is not None: + emr_dimension.name = name + + dim_list.append(emr_dimension) + + emr_metric.dimensions = dim_list +# endregion +# endregion + + +# region Util Functions +def handle_emr(client, module): + request_type, emr_id = get_request_type_and_id(client=client, module=module) + + group_id = None + message = None + has_changed = False + + if request_type == "create": + group_id, message, has_changed = handle_create(client=client, module=module) + elif request_type == "update": + group_id, message, has_changed = handle_update(client=client, module=module, emr_id=emr_id) + elif request_type == "delete": + group_id, message, has_changed = handle_delete(client=client, module=module, emr_id=emr_id) + else: + module.fail_json(msg="Action Not Allowed") + + return group_id, message, has_changed + + +def get_request_type_and_id(client, module): + request_type = None + emr_id = None + should_create = False + + name = module.params.get('name') + state = module.params.get('state') + uniqueness_by = module.params.get('uniqueness_by') + external_emr_id = module.params.get('id') + + if uniqueness_by == 'id': + if external_emr_id is None: + should_create = True + else: + emr_id = external_emr_id + else: + clusters = client.get_all_emr() + should_create, emr_id = find_clusters_with_same_name(clusters=clusters, name=name) + + if should_create is True: + if state == 'present': + request_type = "create" + + elif state == 'absent': + request_type = None + + else: + if state == 'present': + request_type = "update" + + elif state == 'absent': + request_type = "delete" + + return request_type, emr_id + + +def find_clusters_with_same_name(clusters, name): + for cluster in clusters: + if cluster['name'] == name: + return False, cluster['id'] + + return True, None + + +def get_client(module): + # Retrieve creds file variables + creds_file_loaded_vars = dict() + + credentials_path = module.params.get('credentials_path') + + if credentials_path is not None: + try: + with open(credentials_path, "r") as creds: + for line in creds: + eq_index = line.find('=') + var_name = line[:eq_index].strip() + string_value = line[eq_index + 1:].strip() + creds_file_loaded_vars[var_name] = string_value + except IOError: + pass + # End of creds file retrieval + + token = module.params.get('token') + if not token: + token = creds_file_loaded_vars.get("token") + + account = module.params.get('account_id') + if not account: + account = creds_file_loaded_vars.get("account") + + client = spotinst.SpotinstClient(auth_token=token, print_output=False) + + if account is not None: + client = spotinst.SpotinstClient(auth_token=token, account_id=account, print_output=False) + + return client +# endregion + + +# region Request Functions +def handle_create(client, module): + cluster_request = expand_emr_request(module=module, is_update=False) + emr = client.create_emr(emr=cluster_request) + + emr_id = emr['id'] + message = 'Created EMR Cluster Successfully.' + has_changed = True + + return emr_id, message, has_changed + + +def handle_update(client, module, emr_id): + cluster_request = expand_emr_request(module=module, is_update=True) + client.update_emr(emr_id=emr_id, emr=cluster_request) + + message = 'Updated EMR Cluster successfully.' + has_changed = True + + return emr_id, message, has_changed + + +def handle_delete(client, module, emr_id): + client.delete_emr(emr_id=emr_id) + + message = 'Deleted EMR Cluster successfully.' + has_changed = True + + return emr_id, message, has_changed +# endregion + + +def main(): + fields = dict( + account_id=dict(type='str', fallback=(env_fallback, ['SPOTINST_ACCOUNT_ID', 'ACCOUNT'])), + token=dict(type='str', fallback=(env_fallback, ['SPOTINST_TOKEN'])), + state=dict(default='present', choices=['present', 'absent']), + id=dict(type='str'), + uniqueness_by=dict(default='name', choices=['name', 'id']), + credentials_path=dict(type='path', default="~/.spotinst/credentials"), + + name=dict(type='str'), + description=dict(type='str'), + region=dict(type='str'), + strategy=dict(type='dict'), + compute=dict(type='dict'), + cluster=dict(type='dict'), + scheduling=dict(type='dict'), + scaling=dict(type='dict')) + + module = AnsibleModule(argument_spec=fields) + + if not HAS_SPOTINST_SDK: + module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)") + + client = get_client(module=module) + + group_id, message, has_changed = handle_emr(client=client, module=module) + + module.exit_json(changed=has_changed, group_id=group_id, message=message) + + +if __name__ == '__main__': + main() diff --git a/spotinst/spotinst_ocean_cloud.py b/spotinst/spotinst_ocean_cloud.py new file mode 100644 index 0000000..c800cac --- /dev/null +++ b/spotinst/spotinst_ocean_cloud.py @@ -0,0 +1,614 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} +DOCUMENTATION = """ +--- +module: spotinst_ocean_cloud +version_added: 2.8 +short_description: Create, update or delete Spotinst Ocean +author: Spotinst (@jeffnoehren) +description: + - Can create, update, or delete Spotinst Ocean + You will have to have a credentials file in this location - /.spotinst/credentials + The credentials file must contain a row that looks like this + token = + Full documentation available at U(https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-) +requirements: + - python >= 2.7 + - spotinst_sdk >= 1.0.44 +options: + + id: + type: str + description: + - Optional parameters for Updating or deleting cluster based on id. Must have uniquness_by set to "id" + + credentials_path: + type: str + default: "/root/.spotinst/credentials" + description: + - Optional parameter that allows to set a non-default credentials path. + + account_id: + type: str + description: + - Optional parameter that allows to set an account-id inside the module configuration. By default this is retrieved from the credentials path + + token: + type: str + description: + - Optional parameter that allows to set an token inside the module configuration. By default this is retrieved from the credentials path + + state: + type: str + choices: + - present + - absent + default: present + description: + - create update or delete + + uniqueness_by: + type: str + choices: + - id + - name + default: name + description: + - If your group names are not unique, you may use this feature to update or delete a specific group. + Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created. + + name: + type: str + description: + - Name for Ocean cluster + required: true + + controller_cluster_id: + type: str + description: + - This ID must be unique for each Ocean cluster per account + required: true + + region: + type: str + description: + - Region to deploy Ocean cluster instance Groups + required: true + + auto_scaler: + type: dict + description: + - Schema containing info on how auto scaler will function + required: true + + capacity: + type: dict + description: + - Schema containing target, min, and max + required: true + + strategy: + type: dict + description: + - Schema containing how to run the cluster + required: true + + compute: + type: dict + description: + - Schema containing info on the type of compute resources to use + required: true +""" +EXAMPLES = """ +#In this basic example, we create an ocean cluster + +- hosts: localhost + tasks: + - name: create ocean + spotinst_ocean_cloud: + account_id: YOUR_ACCOUNT_ID + token: YOUR_API_TOKEN + state: present + name: ansible_test_ocean + region: us-west-2 + controller_cluster_id: ocean.k8s + auto_scaler: + is_enabled: True + cooldown: 180 + resource_limits: + max_memory_gib: 1500 + max_vCpu: 750 + down: + evaluation_periods: 3 + headroom: + cpu_per_unit: 2000 + memory_per_unit: 0 + num_of_units: 4 + is_auto_config: True + capacity: + minimum: 0 + maximum: 0 + target: 0 + strategy: + utilize_reserved_instances: False + fallback_to_od: True + spot_percentage: 100 + compute: + instance_types: + whitelist: + - c4.8xlarge + subnet_ids: + - sg-123456 + launch_specification: + security_group_ids: + - sg-123456 + image_id: ami-123456 + key_pair: Noam-key + tags: + - tag_key: tags + tag_value: test + register: result + - debug: var=result +""" +RETURN = """ +--- +result: + type: str + sample: o-d861f48d + returned: success + description: Created Ocean Cluster successfully +""" +HAS_SPOTINST_SDK = False +__metaclass__ = type + +import os +import time +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback + +try: + import spotinst_sdk as spotinst + from spotinst_sdk import SpotinstClientException + + HAS_SPOTINST_SDK = True + +except ImportError: + pass + + +# region Request Builder Funcitons +def expand_ocean_request(module, is_update): + do_not_update = module.params.get('do_not_update') or [] + + name = module.params.get('name') + controller_cluster_id = module.params.get('controller_cluster_id') + region = module.params.get('region') + + auto_scaler = module.params.get('auto_scaler') + capacity = module.params.get('capacity') + strategy = module.params.get('strategy') + compute = module.params.get('compute') + + ocean = spotinst.spotinst_ocean.Ocean() + + if name is not None: + if is_update: + if 'name' not in do_not_update: + ocean.name = name + else: + ocean.name = name + + if controller_cluster_id is not None: + if is_update: + if 'controller_cluster_id' not in do_not_update: + ocean.controller_cluster_id = controller_cluster_id + else: + ocean.controller_cluster_id = controller_cluster_id + + if region is not None and not is_update: + ocean.region = region + + # Auto Scaler + if auto_scaler is not None: + if is_update: + if 'auto_scaler' not in do_not_update: + expand_auto_scaler(ocean=ocean, auto_scaler=auto_scaler) + else: + expand_auto_scaler(ocean=ocean, auto_scaler=auto_scaler) + # Capacity + if capacity is not None: + if is_update: + if 'capacity' not in do_not_update: + expand_capacity(ocean=ocean, capacity=capacity) + else: + expand_capacity(ocean=ocean, capacity=capacity) + # Strategy + if strategy is not None: + if is_update: + if 'strategy' not in do_not_update: + expand_strategy(ocean=ocean, strategy=strategy) + else: + expand_strategy(ocean=ocean, strategy=strategy) + # Compute + if compute is not None: + if is_update: + if 'compute' not in do_not_update: + expand_compute(ocean=ocean, compute=compute) + else: + expand_compute(ocean=ocean, compute=compute) + + return ocean + + +# region Auto Scaler +def expand_auto_scaler(ocean, auto_scaler): + ocean_auto_scaler = spotinst.spotinst_ocean.AutoScaler() + + is_enabled = auto_scaler.get('is_enabled') + cooldown = auto_scaler.get('cooldown') + resource_limits = auto_scaler.get('resource_limits') + down = auto_scaler.get('down') + headroom = auto_scaler.get('headroom') + is_auto_config = auto_scaler.get('is_auto_config') + + if is_enabled is not None: + ocean_auto_scaler.is_enabled = is_enabled + if cooldown is not None: + ocean_auto_scaler.cooldown = cooldown + if resource_limits is not None: + expand_resource_limits(ocean_auto_scaler=ocean_auto_scaler, resource_limits=resource_limits) + if down is not None: + expand_down(ocean_auto_scaler=ocean_auto_scaler, down=down) + if headroom is not None: + expand_headroom(ocean_auto_scaler=ocean_auto_scaler, headroom=headroom) + if is_auto_config is not None: + ocean_auto_scaler.is_auto_config = is_auto_config + + ocean.auto_scaler = ocean_auto_scaler + + +def expand_resource_limits(ocean_auto_scaler, resource_limits): + ocean_resource_limits = spotinst.spotinst_ocean.ResourceLimits() + + max_memory_gib = resource_limits.get('max_memory_gib') + max_vCpu = resource_limits.get('max_vCpu') + + if max_memory_gib is not None: + ocean_resource_limits.max_memory_gib = max_memory_gib + if max_vCpu is not None: + ocean_resource_limits.max_vCpu = max_vCpu + + ocean_auto_scaler.resource_limits = ocean_resource_limits + + +def expand_down(ocean_auto_scaler, down): + ocean_down = spotinst.spotinst_ocean.Down() + evaluation_periods = down.get('evaluation_periods') + + if evaluation_periods is not None: + ocean_down.evaluation_periods = evaluation_periods + + ocean_auto_scaler.down = ocean_down + + +def expand_headroom(ocean_auto_scaler, headroom): + ocean_headroom = spotinst.spotinst_ocean.Headroom() + + cpu_per_unit = headroom.get('cpu_per_unit') + memory_per_unit = headroom.get('memory_per_unit') + num_of_units = headroom.get('num_of_units') + + if cpu_per_unit is not None: + ocean_headroom.cpu_per_unit = cpu_per_unit + if memory_per_unit is not None: + ocean_headroom.memory_per_unit = memory_per_unit + if num_of_units is not None: + ocean_headroom.num_of_units = num_of_units + + ocean_auto_scaler.headroom = ocean_headroom +# endregion + + +# region Capacity +def expand_capacity(ocean, capacity): + ocean_capacity = spotinst.spotinst_ocean.Capacity() + + minimum = capacity.get('minimum') + maximum = capacity.get('maximum') + target = capacity.get('target') + + if minimum is not None: + ocean_capacity.minimum = minimum + if maximum is not None: + ocean_capacity.maximum = maximum + if target is not None: + ocean_capacity.target = target + + ocean.capacity = ocean_capacity +# endregion + + +# region Strategy +def expand_strategy(ocean, strategy): + ocean_strategy = spotinst.spotinst_ocean.Strategy() + + utilize_reserved_instances = strategy.get('utilize_reserved_instances') + fallback_to_od = strategy.get('fallback_to_od') + spot_percentage = strategy.get('spot_percentage') + + if utilize_reserved_instances is not None: + ocean_strategy.utilize_reserved_instances = utilize_reserved_instances + if fallback_to_od is not None: + ocean_strategy.fallback_to_od = fallback_to_od + if spot_percentage is not None: + ocean_strategy.spot_percentage = spot_percentage + + ocean.strategy = ocean_strategy +# endregion + + +# region Compute +def expand_compute(ocean, compute): + ocean_compute = spotinst.spotinst_ocean.Compute() + + instance_types = compute.get('instance_types') + subnet_ids = compute.get('subnet_ids') + launch_specification = compute.get('launch_specification') + + if instance_types is not None: + expand_instance_types(ocean_compute=ocean_compute, instance_types=instance_types) + if subnet_ids is not None: + ocean_compute.subnet_ids = subnet_ids + if launch_specification is not None: + expand_launch_specification(ocean_compute=ocean_compute, launch_specification=launch_specification) + + ocean.compute = ocean_compute + + +def expand_instance_types(ocean_compute, instance_types): + ocean_instance_types = spotinst.spotinst_ocean.InstanceTypes() + + whitelist = instance_types.get('whitelist') + blacklist = instance_types.get('blacklist') + + if whitelist is not None: + ocean_instance_types.whitelist = whitelist + if blacklist is not None: + ocean_instance_types.blacklist = blacklist + + ocean_compute.instance_types = ocean_instance_types + + +def expand_launch_specification(ocean_compute, launch_specification): + ocean_launch_specs = spotinst.spotinst_ocean.LaunchSpecifications() + + security_group_ids = launch_specification.get('security_group_ids') + image_id = launch_specification.get('image_id') + iam_instance_profile = launch_specification.get('iam_instance_profile') + key_pair = launch_specification.get('key_pair') + user_data = launch_specification.get('user_data') + tags = launch_specification.get('tags') + + if security_group_ids is not None: + ocean_launch_specs.security_group_ids = security_group_ids + + if image_id is not None: + ocean_launch_specs.image_id = image_id + + if iam_instance_profile is not None: + expand_iam_instance_profile(ocean_launch_specs=ocean_launch_specs, iam_instance_profile=iam_instance_profile) + + if key_pair is not None: + ocean_launch_specs.key_pair = key_pair + + if user_data is not None: + ocean_launch_specs.user_data = user_data + + if tags is not None: + expand_tags(ocean_launch_specs=ocean_launch_specs, tags=tags) + + ocean_compute.launch_specification = ocean_launch_specs + + +def expand_iam_instance_profile(ocean_launch_specs, iam_instance_profile): + ocean_iam_instance_profile = spotinst.spotinst_ocean.IamInstanceProfile() + + arn = iam_instance_profile.get('arn') + name = iam_instance_profile.get('name') + + if arn is not None: + ocean_iam_instance_profile.arn = arn + if name is not None: + ocean_iam_instance_profile.name = name + + ocean_launch_specs.iam_instance_profile = ocean_iam_instance_profile + + +def expand_tags(ocean_launch_specs, tags): + tag_list = [] + + for single_tag in tags: + tag = spotinst.spotinst_ocean.Tag() + + tag_key = single_tag.get('tag_key') + tag_value = single_tag.get('tag_value') + + if tag_key is not None: + tag.tag_key = tag_key + if tag_value is not None: + tag.tag_value = tag_value + + tag_list.append(tag) + + ocean_launch_specs.tags = tag_list +# endregion +# endregion + + +# region Util Functions +def handle_ocean(client, module): + request_type, ocean_id = get_request_type_and_id(client=client, module=module) + + group_id = None + message = None + has_changed = False + + if request_type == "create": + group_id, message, has_changed = handle_create(client=client, module=module) + elif request_type == "update": + group_id, message, has_changed = handle_update(client=client, module=module, ocean_id=ocean_id) + elif request_type == "delete": + group_id, message, has_changed = handle_delete(client=client, module=module, ocean_id=ocean_id) + else: + module.fail_json(msg="Action Not Allowed") + + return group_id, message, has_changed + + +def get_request_type_and_id(client, module): + request_type = None + ocean_id = "None" + should_create = False + + name = module.params.get('name') + state = module.params.get('state') + uniqueness_by = module.params.get('uniqueness_by') + external_ocean_id = module.params.get('id') + + if uniqueness_by == 'id': + if external_ocean_id is None: + should_create = True + else: + ocean_id = external_ocean_id + else: + clusters = client.get_all_ocean_cluster() + should_create, ocean_id = find_clusters_with_same_name(clusters=clusters, name=name) + + if should_create is True: + if state == 'present': + request_type = "create" + + elif state == 'absent': + request_type = None + + else: + if state == 'present': + request_type = "update" + + elif state == 'absent': + request_type = "delete" + + return request_type, ocean_id + + +def find_clusters_with_same_name(clusters, name): + for cluster in clusters: + if cluster['name'] == name: + return False, cluster['id'] + + return True, None + + +def get_client(module): + # Retrieve creds file variables + creds_file_loaded_vars = dict() + + credentials_path = module.params.get('credentials_path') + + if credentials_path is not None: + try: + with open(credentials_path, "r") as creds: + for line in creds: + eq_index = line.find('=') + var_name = line[:eq_index].strip() + string_value = line[eq_index + 1:].strip() + creds_file_loaded_vars[var_name] = string_value + except IOError: + pass + # End of creds file retrieval + + token = module.params.get('token') + if not token: + token = creds_file_loaded_vars.get("token") + + account = module.params.get('account_id') + if not account: + account = creds_file_loaded_vars.get("account") + + client = spotinst.SpotinstClient(auth_token=token, print_output=False) + + if account is not None: + client = spotinst.SpotinstClient(auth_token=token, account_id=account, print_output=False) + + return client +# endregion + + +# region Request Functions +def handle_create(client, module): + cluster_request = expand_ocean_request(module=module, is_update=False) + ocean = client.create_ocean_cluster(ocean=cluster_request) + + ocean_id = ocean['id'] + message = 'Created Ocean Cluster successfully' + has_changed = True + + return ocean_id, message, has_changed + + +def handle_update(client, module, ocean_id): + cluster_request = expand_ocean_request(module=module, is_update=True) + client.update_ocean_cluster(ocean_id=ocean_id, ocean=cluster_request) + + message = 'Updated Ocean Cluster successfully' + has_changed = True + + return ocean_id, message, has_changed + + +def handle_delete(client, module, ocean_id): + client.delete_ocean_cluster(ocean_id=ocean_id) + + message = 'Deleted Ocean Cluster successfully' + has_changed = True + + return ocean_id, message, has_changed +# endregion + + +def main(): + fields = dict( + account_id=dict(type='str', fallback=(env_fallback, ['SPOTINST_ACCOUNT_ID', 'ACCOUNT'])), + token=dict(type='str', fallback=(env_fallback, ['SPOTINST_TOKEN'])), + state=dict(type='str', default='present', choices=['present', 'absent']), + id=dict(type='str'), + uniqueness_by=dict(type='str', default='name', choices=['name', 'id']), + credentials_path=dict(type='path', default="~/.spotinst/credentials"), + + name=dict(type='str'), + controller_cluster_id=dict(type='str'), + region=dict(type='str'), + auto_scaler=dict(type='dict'), + capacity=dict(type='dict'), + strategy=dict(type='dict'), + compute=dict(type='dict')) + + module = AnsibleModule(argument_spec=fields) + + if not HAS_SPOTINST_SDK: + module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)") + + client = get_client(module=module) + + group_id, message, has_changed = handle_ocean(client=client, module=module) + + module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=[]) + + +if __name__ == '__main__': + main() diff --git a/test/__init__.py b/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/test/test_spotinst_aws_elastigroup.py b/test/test_spotinst_aws_elastigroup.py new file mode 100644 index 0000000..0a45a51 --- /dev/null +++ b/test/test_spotinst_aws_elastigroup.py @@ -0,0 +1,71 @@ +import unittest +import sys +from mock import MagicMock +sys.modules['spotinst_sdk'] = MagicMock() + +from ansible.modules.cloud.spotinst.spotinst_aws_elastigroup import expand_elastigroup + + +class MockModule: + + def __init__(self, input_dict): + self.params = input_dict + + +class TestSpotinstAwsElastigroup(unittest.TestCase): + """Unit test for the spotinst_ocean_cloud module""" + + def test_expand_elastigroup(self): + """Format input into proper json structure""" + + input_dict = dict( + name="test_name", + min_size=1, + max_size=2, + target=3, + product="test_product", + image_id="test_id", + health_check_grace_period=0, + ebs_optimized=True, + elastic_beanstalk=dict( + managed_actions=dict( + platform_update=dict( + perform_at="test_perform_at", + time_window="test_time_window", + update_level="test_update_level" + ) + ), + deployment_preferences=dict( + grace_period=0, + batch_size_percentage=100, + automatic_roll=True + ) + ) + ) + module = MockModule(input_dict=input_dict) + actual_eg = expand_elastigroup(module=module, is_update=False) + + self.assertEqual("test_name", actual_eg.name) + + self.assertEqual(1, actual_eg.capacity.minimum) + self.assertEqual(2, actual_eg.capacity.maximum) + self.assertEqual(3, actual_eg.capacity.target) + + self.assertEqual("test_product", actual_eg.compute.product) + self.assertEqual("test_id", actual_eg.compute.launch_specification.image_id) + self.assertEqual(0, actual_eg.compute.launch_specification.health_check_grace_period) + self.assertEqual(True, actual_eg.compute.launch_specification.ebs_optimized) + + self.assertEqual( + "test_perform_at", actual_eg.third_parties_integration.elastic_beanstalk.managed_actions.platform_update.perform_at) + self.assertEqual( + "test_time_window", actual_eg.third_parties_integration.elastic_beanstalk.managed_actions.platform_update.time_window) + self.assertEqual( + "test_update_level", actual_eg.third_parties_integration.elastic_beanstalk.managed_actions.platform_update.update_level) + + self.assertEqual( + 0, actual_eg.third_parties_integration.elastic_beanstalk.deployment_preferences.grace_period) + self.assertEqual( + 100, actual_eg.third_parties_integration.elastic_beanstalk.deployment_preferences.batch_size_percentage) + self.assertEqual( + True, actual_eg.third_parties_integration.elastic_beanstalk.deployment_preferences.automatic_roll) diff --git a/test/test_spotinst_event_subscription.py b/test/test_spotinst_event_subscription.py new file mode 100644 index 0000000..74961b4 --- /dev/null +++ b/test/test_spotinst_event_subscription.py @@ -0,0 +1,35 @@ +import unittest +import sys +from mock import MagicMock +sys.modules['spotinst_sdk'] = MagicMock() + +from ansible.modules.cloud.spotinst.spotinst_event_subscription import expand_subscription_request + + +class MockModule: + + def __init__(self, input_dict): + self.params = input_dict + + +class TestSpotinstEventSubscription(unittest.TestCase): + """Unit test for the spotinst_event_subscription module""" + + def test_expand_subscription_request(self): + """Format input into proper json structure""" + + input_dict = dict( + resource_id="test_resource_id", + protocol="test_protocol", + endpoint="test_endpoint", + event_type="test_event_type", + event_format="test_event_format" + ) + module = MockModule(input_dict=input_dict) + actual_event_subscription = expand_subscription_request(module=module) + + self.assertEqual("test_resource_id", actual_event_subscription.resource_id) + self.assertEqual("test_protocol", actual_event_subscription.protocol) + self.assertEqual("test_endpoint", actual_event_subscription.endpoint) + self.assertEqual("test_event_type", actual_event_subscription.event_type) + self.assertEqual("test_event_format", actual_event_subscription.event_format) diff --git a/test/test_spotinst_mrscaler.py b/test/test_spotinst_mrscaler.py new file mode 100644 index 0000000..a28a040 --- /dev/null +++ b/test/test_spotinst_mrscaler.py @@ -0,0 +1,53 @@ +import unittest +import sys +from mock import MagicMock +sys.modules['spotinst_sdk'] = MagicMock() + +from ansible.modules.cloud.spotinst.spotinst_mrscaler import expand_emr_request + + +class MockModule: + + def __init__(self, input_dict): + self.params = input_dict + + +class TestSpotinstMrScaler(unittest.TestCase): + """Unit test for the spotinst_spotinst_emr module""" + + def test_expand_emr_request(self): + """Format input into proper json structure""" + + input_dict = dict( + name="test_name", + strategy=dict( + new=dict( + release_label="emr-5.17.0" + ) + ), + compute=dict( + instance_groups=dict( + master_group=dict( + life_cycle="SPOT", + target=0 + ), + core_group=dict( + target=1, + life_cycle="ON_DEMAND" + ) + ) + ) + ) + + module = MockModule(input_dict=input_dict) + actual_mrScaler = expand_emr_request(module=module, is_update=False) + + self.assertEqual("test_name", actual_mrScaler.name) + + self.assertEqual("emr-5.17.0", actual_mrScaler.strategy.new.release_label) + + self.assertEqual("SPOT", actual_mrScaler.compute.instance_groups.master_group.life_cycle) + self.assertEqual(0, actual_mrScaler.compute.instance_groups.master_group.target) + + self.assertEqual("ON_DEMAND", actual_mrScaler.compute.instance_groups.core_group.life_cycle) + self.assertEqual(1, actual_mrScaler.compute.instance_groups.core_group.target) diff --git a/test/test_spotinst_ocean_cloud.py b/test/test_spotinst_ocean_cloud.py new file mode 100644 index 0000000..657aea3 --- /dev/null +++ b/test/test_spotinst_ocean_cloud.py @@ -0,0 +1,43 @@ +import unittest +import sys +from mock import MagicMock +sys.modules['spotinst_sdk'] = MagicMock() + +from ansible.modules.cloud.spotinst.spotinst_ocean_cloud import expand_ocean_request + + +class MockModule: + + def __init__(self, input_dict): + self.params = input_dict + + +class TestSpotinstOceanCloud(unittest.TestCase): + """Unit test for the spotinst_ocean_cloud module""" + + def test_expand_ocean_request(self): + """Format input into proper json structure""" + + input_dict = dict( + name="test_name", + controller_cluster_id="test_controller_cluster_id", + region="test_region", + compute=dict( + launch_specification=dict( + user_data="test_user_data", + key_pair="test_key_pair", + image_id="test_image_id", + security_group_ids=["test_security_group_ids"] + ) + ) + ) + module = MockModule(input_dict=input_dict) + actual_ocean = expand_ocean_request(module=module, is_update=False) + + self.assertEqual("test_name", actual_ocean.name) + self.assertEqual("test_controller_cluster_id", actual_ocean.controller_cluster_id) + self.assertEqual("test_region", actual_ocean.region) + self.assertEqual("test_user_data", actual_ocean.compute.launch_specification.user_data) + self.assertEqual("test_key_pair", actual_ocean.compute.launch_specification.key_pair) + self.assertEqual("test_image_id", actual_ocean.compute.launch_specification.image_id) + self.assertEqual(["test_security_group_ids"], actual_ocean.compute.launch_specification.security_group_ids)