def test_create_windows_instance(self):
        ctx = self.mock_ctx('testwin')
        ctx.node.properties[constants.IMAGE_KEY
                            ][constants.PUBLISHER_KEY] = \
            'MicrosoftWindowsServer'
        ctx.node.properties[constants.IMAGE_KEY
                            ][constants.OFFER_KEY] = 'WindowsServer'
        ctx.node.properties[constants.IMAGE_KEY
                            ][constants.SKU_KEY] = '2012-R2-Datacenter'
        ctx.node.properties[constants.WINDOWS_AUTOMATIC_UPDATES_KEY] = True

        current_ctx.set(ctx=ctx)
        ctx.logger.info("BEGIN create windows VM test")

        ctx.logger.info("Creating VM...")
        instance.create(ctx=ctx)

        current_ctx.set(ctx=ctx)
        jsonVM = instance.get_json_from_azure(ctx=ctx)

        self.assertIsNotNone(jsonVM['properties']['osProfile'][
                                    'windowsConfiguration'])
        current_ctx.set(ctx=ctx)
        utils.wait_status(ctx, "instance",constants.SUCCEEDED, 600)

        ctx.logger.info("delete windows VM")
        self.assertEqual(202, instance.delete(ctx=ctx))

        ctx.logger.info("END create windows VM test")
    def test_create_too_much_datadisks(self):
        disks = [{'name': 'much_disks_1',
                  'size': 100,
                  'deletable': False,
                  'caching': 'None'
                },{'name': 'much_disks_2',
                   'size': 200,
                   'deletable': False,
                   'caching': 'ReadWrite'
                },{'name': 'much_disks_3',
                   'size': 200,
                   'deletable': False,
                   'caching': 'ReadOnly'
                }]

        test_name = 'test-create-too-much-datadisks'
        ctx = self.mock_ctx(test_name , disks)
        current_ctx.set(ctx=ctx)
        ctx.logger.info("BEGIN create VM test: {}".format(test_name)) 

        instance.create(ctx=ctx)
        
        current_ctx.set(ctx=ctx)
        utils.wait_status(ctx, 'instance', timeout=900)

        current_ctx.set(ctx=ctx)
        datadisks.create(ctx=ctx)

        current_ctx.set(ctx=ctx)
        json_VM = instance.get_json_from_azure(ctx=ctx)

        self.assertIsNotNone(json_VM['properties'][
                                        'storageProfile']['dataDisks'])

        disks_vm = json_VM['properties']['storageProfile']['dataDisks']

        ctx.logger.debug(disks_vm)

        self.assertNotEqual(len(disks), len(disks_vm))

        self.assertEqual(disks[0]['name'], disks_vm[0]['name'])
        self.assertEqual(disks[0]['size'], disks_vm[0]['diskSizeGB'])
        self.assertEqual(disks[0]['caching'], disks_vm[0]['caching'])

        self.assertEqual(disks[1]['name'], disks_vm[1]['name'])
        self.assertEqual(disks[1]['size'], disks_vm[1]['diskSizeGB'])
        self.assertEqual(disks[1]['caching'], disks_vm[1]['caching'])

        current_ctx.set(ctx=ctx)
        ctx.logger.info("BEGIN delete VM test: {}".format(test_name))
        instance.delete(ctx=ctx)

        try:
            current_ctx.set(ctx=ctx)
            utils.wait_status(ctx, 'instance', 
                              constants.DELETING, timeout=900)
        except utils.WindowsAzureError:
            pass
    def test_create_2nic_instance(self):
        ctx = self.mock_ctx('testcreate2nicinstance')
        current_ctx.set(ctx=ctx)
        ctx.logger.info("BEGIN create 2 NIC VM test: {}".format(ctx.instance.id))

        subnet_name = 'instancesubnet_test_2_' + self.__random_id
        nic_name = 'instance_nic_test_2_' + self.__random_id

        ctx.logger.info("create new subnet")
        ctx.node.properties[constants.SUBNET_KEY] = subnet_name
        ctx.node.properties[constants.SUBNET_ADDRESS_KEY] =\
            "10.0.2.0/24"
        current_ctx.set(ctx=ctx)
        subnet.create(ctx=ctx)
        current_ctx.set(ctx=ctx)
        utils.wait_status(ctx, "subnet",constants.SUCCEEDED, 600)

        ctx.logger.info("create second NIC")
        ctx.node.properties[constants.NETWORK_INTERFACE_KEY] = nic_name
        ctx.node.properties[constants.NIC_PRIMARY_KEY] = True
        ctx.node.properties[constants.AZURE_CONFIG_KEY][constants.SUBNET_KEY] = subnet_name
        for relationship in ctx.instance.relationships:
            if relationship.type == constants.NIC_CONNECTED_TO_SUBNET:
                relationship.target.instance.runtime_properties[constants.SUBNET_KEY] = subnet_name
        current_ctx.set(ctx=ctx)
        nic.create(ctx=ctx)
        current_ctx.set(ctx=ctx)
        utils.wait_status(ctx, "nic",constants.SUCCEEDED, 600)

        ctx.logger.info("create VM")
        ctx.node.properties[constants.FLAVOR_KEY] = 'Standard_A3'
        ctx.instance.relationships.append(test_mockcontext.MockRelationshipContext(node_id='test',
            runtime_properties={
                constants.NETWORK_INTERFACE_KEY: nic_name,
                constants.NIC_PRIMARY_KEY: True
            },
            type=constants.INSTANCE_CONNECTED_TO_NIC)
        )
        current_ctx.set(ctx=ctx)
        instance.create(ctx=ctx)
        current_ctx.set(ctx=ctx)
        utils.wait_status(ctx, "instance",constants.SUCCEEDED, 600)

        ctx.logger.info("verify the NIC's number of the instance")
        json = instance.get_json_from_azure()
        self.assertEqual(len(json['properties']['networkProfile']['networkInterfaces']),2)

        ctx.logger.info("delete VM")
        self.assertEqual(202, instance.delete(ctx=ctx))

        ctx.logger.info("END create VM test")
    def test_create_instance_from_vhd(self):
        '''To run this test, you need all the required resource to start a machine
        (resource group, storage account, nic). You then have to upload a valid
        bootable VHD on the storage account. Note the vhd's endpoint and replace 
        MY_URI_VHD by this value.
        Then, you can run the test.
        Note the resource group will not be deleted by the class teardown.
        '''
        ctx = self.mock_ctx('testinstancevhd')
        ctx.node.properties[constants.AZURE_CONFIG_KEY
                            ][constants.RESOURCE_GROUP_KEY] = MY_RESOURCE_GROUP
        ctx.node.properties[constants.AZURE_CONFIG_KEY
                            ][constants.STORAGE_ACCOUNT_KEY] = MY_STORAGE_ACCOUNT
        ctx.node.properties[constants.NETWORK_INTERFACE_KEY] = MY_NIC
        ctx.node.properties[constants.IMAGE_KEY] = {}
        ctx.node.properties[constants.IMAGE_KEY
                            ][constants.OS_URI_KEY] = MY_URI_VHD
        ctx.node.properties[constants.IMAGE_KEY
                            ][constants.OS_TYPE_KEY] = 'Linux'

        current_ctx.set(ctx=ctx)
        ctx.logger.info("BEGIN create VM test: {}".format(ctx.instance.id))
        ctx.logger.info("create VM") 

        instance.create(ctx=ctx) 
        current_ctx.set(ctx=ctx)
        utils.wait_status(ctx, "instance",constants.SUCCEEDED, 600)

        current_ctx.set(ctx=ctx)
        jsonVM = instance.get_json_from_azure(ctx=ctx)

        self.assertEqual(jsonVM['properties']['storageProfile'
                                              ]['osDisk']['osType'],  
                         ctx.node.properties[constants.IMAGE_KEY
                            ][constants.OS_TYPE_KEY]
                         )

        self.assertEqual(jsonVM['properties']['storageProfile'
                                              ]['osDisk']['image']['uri'],
                         ctx.node.properties[constants.IMAGE_KEY
                            ][constants.OS_URI_KEY]
                         )

        ctx.logger.info("delete VM")
        self.assertEqual(202, instance.delete(ctx=ctx))

        ctx.logger.info("END create VM test")
    def test_create_datadisk(self):
        disk = [{'name': 'disk_1',
                 'size': 100,
                 'deletable': False,
                 'caching': 'None'
               }]

        test_name = 'test-create-datadisk'
        ctx = self.mock_ctx(test_name, disk)
        current_ctx.set(ctx=ctx)
        ctx.logger.info("BEGIN create VM test: {}".format(test_name)) 

        instance.create(ctx=ctx)
        current_ctx.set(ctx=ctx)
        utils.wait_status(ctx, 'instance', constants.SUCCEEDED, timeout=900)

        current_ctx.set(ctx=ctx)
        datadisks.create(ctx=ctx)

        current_ctx.set(ctx=ctx)
        json_VM = instance.get_json_from_azure(ctx=ctx)

        ctx.logger.debug(json_VM)

        self.assertIsNotNone(json_VM['properties'][
                                        'storageProfile']['dataDisks'])
        
        disks_vm = json_VM['properties']['storageProfile']['dataDisks']

        ctx.logger.debug(disks_vm)

        self.assertEqual(disk[0]['name'], disks_vm[0]['name'])
        self.assertEqual(disk[0]['caching'], disks_vm[0]['caching'])
        self.assertEqual(disk[0]['size'], disks_vm[0]['diskSizeGB'])

        current_ctx.set(ctx=ctx)
        ctx.logger.info("BEGIN delete VM test: {}".format(test_name)) 
        instance.delete(ctx=ctx)

        try:
            current_ctx.set(ctx=ctx)
            utils.wait_status(ctx, 'instance', 
                              constants.DELETING, timeout=900)
        except utils.WindowsAzureError:
            pass
    def test_get_json_instance(self):
        ctx = self.mock_ctx('testgetjson')
        current_ctx.set(ctx=ctx)
        ctx.logger.info("BEGIN getjson VM test")

        ctx.logger.info("Creating VM...")
        instance.create(ctx=ctx)

        time.sleep(TIME_DELAY)

        ctx.logger.info("Getting json...")
        current_ctx.set(ctx=ctx)
        jsonVM = instance.get_json_from_azure(ctx=ctx)

        self.assertEqual(jsonVM['name'], ctx.node.properties[constants.COMPUTE_KEY])

        time.sleep(TIME_DELAY)

        ctx.logger.info("Deleting VM...")
        current_ctx.set(ctx=ctx)
        instance.delete(ctx=ctx)
    def test_add_availability_set_instance(self):
        ctx = self.mock_ctx('testaddavailabilityinstance')
        current_ctx.set(ctx=ctx)
        ctx.logger.info("BEGIN add availability set VM test: {}".format(ctx.instance.id))

        ctx.logger.info("create availability_set")
        self.assertEqual(200, availability_set.create(ctx=ctx))
        ctx.logger.debug("availability_set_id = {}".format(
            ctx.instance.runtime_properties[constants.AVAILABILITY_ID_KEY]))

        ctx.instance.relationships.append(test_mockcontext.MockRelationshipContext(node_id='test',
            runtime_properties={
                constants.AVAILABILITY_ID_KEY:\
                     ctx.instance.runtime_properties[constants.AVAILABILITY_ID_KEY]
            },
            type=constants.INSTANCE_CONTAINED_IN_AVAILABILITY_SET)
        )

        ctx.logger.info("create VM")
        instance.create(ctx=ctx)

        current_ctx.set(ctx=ctx)
        utils.wait_status(ctx, "instance",constants.SUCCEEDED, 600)

        ctx.logger.info("test instance is in availability_set")
        current_ctx.set(ctx=ctx)
        json = instance.get_json_from_azure(ctx=ctx)
        self.assertIsNotNone(json['properties']['availabilitySet'])
        self.assertEqual(str(json['properties']['availabilitySet']['id']).lower(),
            str(ctx.instance.runtime_properties[constants.AVAILABILITY_ID_KEY]).lower()
        )

        ctx.logger.info("delete VM")
        self.assertEqual(202, instance.delete(ctx=ctx))

        ctx.logger.info("delete availability_set")
        self.assertEqual(200, availability_set.delete(ctx=ctx))

        ctx.logger.info("END create VM test")
    def test_datadisk_in_storage_account(self):
        disk = [{'name': 'attach_disk',
                  'size': 100,
                  'deletable': False,
                  'caching': 'None'
                }]

        test_name = 'test-datadisk-in-storage-account'
        ctx = self.mock_ctx(test_name, disk)

        current_ctx.set(ctx=ctx)
        ctx.logger.info("CREATE storage account")
        ctx.node.properties[constants.ACCOUNT_TYPE_KEY] = "Standard_LRS"
        ctx.node.properties[constants.STORAGE_ACCOUNT_KEY] = \
            "storageaccountdisk" + self.__random_id
        storage.create(ctx=ctx)
        current_ctx.set(ctx=ctx)
        utils.wait_status(ctx, "storage",constants.SUCCEEDED, timeout=600)

        current_ctx.set(ctx=ctx)
        ctx.logger.info("BEGIN create VM test: {}".format(test_name))

        instance.create(ctx=ctx)
        
        ctx.instance.relationships.append(
                    MockRelationshipContext(
                                'test',
                                {constants.STORAGE_ACCOUNT_KEY: \
                                       'storageaccountdisk' + self.__random_id
                                }, 
                                constants.DISK_CONTAINED_IN_STORAGE_ACCOUNT
                    )
        )
                                            
        current_ctx.set(ctx=ctx)
        datadisks.create(ctx=ctx)

        current_ctx.set(ctx=ctx)
        utils.wait_status(ctx, 'instance',timeout=900)

        jsonInstance = instance.get_json_from_azure(ctx=ctx)

        self.assertIn('storageaccountdisk' + self.__random_id,
                      jsonInstance['properties'
                                   ]['storageProfile'
                                     ]['dataDisks'][0]['vhd']['uri']
                      )

        ctx.logger.info('Disks are located in {}.'.format(
                                    'storageaccountdisk' + self.__random_id,
                                    )
                        )

        current_ctx.set(ctx=ctx)
        ctx.logger.info("BEGIN delete VM test: {}".format(test_name))
        instance.delete(ctx=ctx)

        try:
            current_ctx.set(ctx=ctx)
            utils.wait_status(ctx, 'instance', 
                              constants.DELETING, timeout=900)
        except utils.WindowsAzureError:
            pass

        current_ctx.set(ctx=ctx)
        ctx.logger.info("DELETE storage account")
        ctx.node.properties[constants.ACCOUNT_TYPE_KEY] = "Standard_LRS"
        ctx.node.properties[constants.STORAGE_ACCOUNT_KEY] = \
            "storageaccountdisk" + self.__random_id
        storage.delete(ctx=ctx)
def create(**_):
    """Create a data disk. The datadisk can be created in the same
    storage account of the VM, or in its own storage account as
    defined in the blueprint. Within the storage account, the disk
    is contained in a container, its name follows this schema:
    <vm_name>-vhds. All disks are automatically suffixed with .vhd.

    :param ctx: The Cloudify ctx context.
    """
    utils.validate_node_property(constants.DISKS_KEY, ctx.node.properties)
    
    azure_config = utils.get_azure_config(ctx)

    subscription_id = azure_config[constants.SUBSCRIPTION_KEY]
    resource_group_name = azure_config[constants.RESOURCE_GROUP_KEY]
    vm_name = utils.get_target_property(
                                        ctx, 
                                        constants.DISK_ATTACH_TO_INSTANCE,
                                        constants.COMPUTE_KEY
                                        )

    disks = ctx.node.properties[constants.DISKS_KEY]
    api_version = constants.AZURE_API_VERSION_06
    
    try:
        storage_account = utils.get_target_property(
                                ctx, 
                                constants.DISK_CONTAINED_IN_STORAGE_ACCOUNT,
                                constants.STORAGE_ACCOUNT_KEY
                                )
        ctx.logger.info(("Use storage account {} in" +  
                        "DISK_CONTAINED_IN_STORAGE_ACCOUNT relationship").
                        format(storage_account)
                        )
    except NonRecoverableError:
        storage_account = utils.get_target_property(
                                        ctx, 
                                        constants.DISK_ATTACH_TO_INSTANCE,
                                        constants.STORAGE_ACCOUNT_KEY
                                        )
        ctx.logger.info(("Use storage account {}" + 
                        "in DISK_ATTACH_TO_INSTANCE relationship").
                        format(storage_account)
                        )

    # Place the vm name and storag account in runtime_properties 
    # It is used to retrieve disks from the storage account
    ctx.instance.runtime_properties[constants.COMPUTE_KEY] = vm_name
    ctx.instance.runtime_properties[constants.STORAGE_ACCOUNT_KEY] = storage_account
    
    # Caching the list of datadisks existing in the storage account
    blobs_name = []
    try:
        blobs_name = _get_datadisks_from_storage(ctx)
    except utils.WindowsAzureError as e:
        ctx.logger.debug('{} == 404: {}'.format(e.code, str(e.code) == '404'))
        if str(e.code) == '404' and 'ContainerNotFound' in e.message :
            ctx.logger.info('A container has not been found, it will be created.')
        else:
            raise e
        

    try:
        for disk in disks:
            json_VM = instance.get_json_from_azure()

            if 'dataDisks' in json_VM['properties']['storageProfile']:
                lun = len(
                          json_VM['properties']['storageProfile']['dataDisks']
                         )
            else:
                lun = 0
                json_VM['properties']['storageProfile']['dataDisks'] = []

            uri = "http://{}.blob.core.windows.net/{}-vhds/{}.vhd".format(
                        storage_account,
                        vm_name,
                        disk['name']
                        )

            if _is_datadisk_exists(blobs_name, disk['name']):
                ctx.logger.info(('Disk {} already exists,' +  
                                'trying to attach it').format(disk['name']))
                createOption = 'attach'
            else:
                ctx.logger.info('Disk {} does not exist,' + 
                                'creating it.'.format(disk['name'])
                                )
                createOption = 'empty'

            json_disk = {"name": disk['name'], 
                         "diskSizeGB": disk['size'], 
                         "lun": lun,
                         "vhd": { "uri" : uri },
                         "caching": disk['caching'],
                         "createOption": createOption
                         }

            json_VM['properties']['storageProfile']['dataDisks'].append(
                                                                json_disk
                                                                )

            ctx.logger.info(('Attaching disk {} on lun {} ' + 
                             'and machine {}.').format(disk['name'],
                                                      lun,
                                                      vm_name
                                                      )
                            )
            connection.AzureConnectionClient().azure_put(
                        ctx,
                        ("subscriptions/{}/resourcegroups/{}/" +
                        "providers/Microsoft.Compute" +
                        "/virtualMachines/{}" +
                        "?validating=true&api-version={}").format(
                                   subscription_id,
                                   resource_group_name,
                                   vm_name,
                                   api_version
                       ),
                       json=json_VM
                       )

            utils.wait_status(ctx, 'instance', constants.SUCCEEDED)

    except utils.WindowsAzureError as e:
    # Do not interrup deployment if maximum number of disks has been reached
        if "The maximum number of data disks" in e.message:
            ctx.logger.warning("{}".format(e.message))
            pass
        else:
            raise e
    except NonRecoverableError as e:
        ctx.logger.error(
                'Machine has failed, check if disks do not already exist.'
                )
        ctx.logger.info('Cancelling worflow.')
        raise e