示例#1
0
 def temp_list(self, args):
     templates_service = self.c.system_service().templates_service()
     try:
         if args.prefix is not None:
             search_prefix = 'name=' + args.prefix + '*'
         else:
             search_prefix = '*'
         templates = templates_service.list(search=search_prefix)
         rows = []
         for template in templates:
             cores = template.cpu.topology.cores * template.cpu.topology.sockets * template.cpu.topology.threads
             memory = template.memory / 1024 / 1024
             description = ""
             if template.description is not None:
                 description = template.description
             rows.append([
                 template.id, template.name,
                 str(memory) + ' M', cores,
                 str(description)
             ])
         output = Output(["ID", "Name", "Mem", "CPU", "Description"], rows)
         output.print_table()
         self.c.close()
     except sdk.Error as err:
         print "Failed to list templates, %s" % str(err)
示例#2
0
文件: vm.py 项目: cchen666/titamu
    def vm_show(self, args):
        vms_service = self.c.system_service().vms_service()
        try:
            rows = []

            vm = vms_service.list(search=args.vm_name)[0]
            vm_service = vms_service.vm_service(vm.id)
            # Print basic information
            rows.append(["Name", vm.name])
            rows.append(["ID", vm.id])
            rows.append(["Status", str(vm.status).upper()])
            rows.append(["Memory", str(vm.memory / 1024 / 1024) + 'M'])
            rows.append([
                "CPU", vm.cpu.topology.cores * vm.cpu.topology.sockets *
                vm.cpu.topology.threads
            ])
            # Print Disk information
            disk_attachments_service = vm_service.disk_attachments_service()
            disk_attachments = disk_attachments_service.list()
            for disk_attachment in disk_attachments:
                disk = self.c.follow_link(disk_attachment.disk)
                rows.append([
                    "Disks",
                    [
                        disk.name, disk.id,
                        str(disk.provisioned_size / 1024 / 1024 / 1024) + 'G'
                    ]
                ])

            # We use nics_service() instead of reported_devices_service()
            # because we need to get NIC id and name
            nics_service = vm_service.nics_service().list()
            for nic in nics_service:
                for device in nic.reported_devices:
                    if device.ips is not None:
                        for ip in device.ips:
                            if ip.version.value == 'v4':
                                rows.append([
                                    "Active Nics",
                                    [
                                        nic.name, nic.mac.address, nic.id,
                                        ip.address
                                    ]
                                ])
                    if device.ips is None:
                        rows.append([
                            "Inactive Nics",
                            [nic.name, nic.mac.address, nic.id]
                        ])

            output = Output(["Item", "Value"], rows)
            output.print_table()
        except sdk.Error as err:
            print str(err)
        except IndexError:
            print "Error: No such VM %s" % args.vm_name
示例#3
0
    def forward(self, inputs):
        item_x, context_x, user_id = split_input(inputs, requires_grad=False,
                                                 dtype=[torch.long, torch.float, torch.long], device=self.device)

        batch_size = item_x.shape[0]

        (item_enc_hidden, ctxt_enc_last_hidden, norm_attn) = self.encode(item_x, context_x, user_id)
        global_preference = self.concat_to_item_tfm(item_enc_hidden)

        # calculate p(v | sequence) based on the global preference
        # (batch x num_item_units) times (|V| x num_item_units) -> (batch x |V|)
        item_logits = torch.einsum('bd,vd->bv', global_preference, self.item_embedding.weight)
        if not self.training:
            item_probs = item_logits.softmax(dim=-1)
            if self.lambda_score == 1:
                _, recommended_list = torch.topk(item_probs, self.recommendation_len, sorted=True)
            else:
                # DIVERSITY WITH Maximal Marginal Relevance
                init_preds = item_probs.argmax(dim=-1)
                recommended_list = init_preds.detach().view(batch_size, 1)
                for _ in range(self.recommendation_len - 1):
                    # calculate diversity using the embeddings of the given ids
                    selected = self.compute_distance(item_probs, recommended_list)
                    recommended_list = torch.cat([recommended_list, selected.view(batch_size, 1)], dim=-1)
        else:
            recommended_list = torch.zeros(1, device=self.device)

        # return as a tuple for the trainer. This will be split up in the loss function.
        recommended_list = recommended_list.requires_grad_(False)
        return Output(preds=recommended_list, logits=item_logits,
                      attention=norm_attn, context_logits=None, context_preds=None)
示例#4
0
文件: net.py 项目: cchen666/titamu
 def net_list(self, args):
     networks_service = self.c.system_service().networks_service()
     try:
         networks = networks_service.list()
         rows = []
         for network in networks:
             comment = ""
             description = ""
             if network.description is not None:
                 description = network.description
             if network.comment is not None:
                 comment = network.comment
             rows.append([network.id, network.name, comment, description])
         output = Output(["ID", "Name", "Comment", "Description"], rows)
         output.print_table()
         self.c.close()
     except sdk.Error as err:
         print "Failed to list networks, %s" % str(err)
示例#5
0
 def sd_list(self, args):
     try:
         sds_service = self.c.system_service().storage_domains_service()
         rows = []
         for sd in sds_service.list():
             if sd.available is not None and sd.used is not None:
                 rows.append([
                     sd.id, sd.name, sd.storage.type,
                     str((sd.available + sd.used) / 1024 / 1024 / 1024) +
                     'G',
                     str(sd.available / 1024 / 1024 / 1024) + 'G'
                 ])
             else:
                 rows.append(
                     [sd.id, sd.name, sd.storage.type, "N/A", "N/A"])
         output = Output(["ID", "Name", "Type", "Total", "Free"], rows)
         output.print_table()
     except sdk.Error as err:
         print "Failed to list disks, %s" % str(err)
示例#6
0
    def forward(self, inputs):
        item_x, _, _ = inputs
        lengths = (item_x != 0).sum(dim=-1)
        total_length = item_x.shape[1]
        batch_size = item_x.shape[0]
        mask = item_x.ne(0)

        # Look up embeddings
        item_x_embedded = self.item_embedding(item_x)
        item_x_embedded = dropout(item_x_embedded,
                                  p=self.dropout_rate,
                                  training=self.training)

        item_x = pack_padded_sequence(item_x_embedded,
                                      lengths,
                                      batch_first=True,
                                      enforce_sorted=False)

        # Do forward passes GRU stuff
        item_enc_all, item_enc_hidden = self.item_encoder_GRU(item_x)
        item_enc_all = pad_packed_sequence(item_enc_all,
                                           total_length=total_length,
                                           batch_first=True)
        item_enc_all = item_enc_all[0]  # 0 contains the padded sequences

        item_enc_all = dropout(item_enc_all,
                               p=self.dropout_rate,
                               training=self.training)
        item_enc_hidden = dropout(item_enc_hidden,
                                  p=self.dropout_rate,
                                  training=self.training)

        # Attention
        item_enc_hidden, attn, norm_attn = self.attention(
            item_enc_hidden.reshape(batch_size, -1).unsqueeze(1),
            item_enc_all,
            item_enc_all,
            mask=mask.unsqueeze(1))
        global_preference = self.hidden_to_item_tfm(item_enc_hidden).reshape(
            -1, self.num_item_units)
        item_logits = torch.einsum('bd,vd->bv', global_preference,
                                   self.item_embedding.weight)
        if not self.training:
            item_probs = item_logits.softmax(dim=-1)
            _, recommended_list = torch.topk(item_probs,
                                             self.recommendation_len,
                                             sorted=True)
        else:
            recommended_list = torch.zeros(1, device=self.device)
        recommended_list = recommended_list.requires_grad_(False)
        return Output(preds=recommended_list,
                      logits=item_logits,
                      attention=norm_attn.view(batch_size, -1),
                      context_logits=None,
                      context_preds=None)
示例#7
0
    def forward(self, inputs):
        item_x, context_x, user_id = split_input(
            inputs,
            requires_grad=False,
            dtype=[torch.long, torch.float, torch.long],
            device=self.device)
        batch_size = item_x.shape[0]
        (item_enc_hidden, ctxt_enc_hidden,
         norm_attn) = self.encode(item_x, context_x, user_id)

        context_predict_input = torch.cat((item_enc_hidden, ctxt_enc_hidden),
                                          dim=-1)
        context_logits = self.context_MLP(context_predict_input)
        context_probs = context_logits.softmax(dim=-1)
        context_preds = context_logits.argmax(dim=-1)

        context_y_embedded = torch.einsum('bc, cd -> bd', context_probs,
                                          self.context_embedding)
        context_y_embedded = dropout(context_y_embedded,
                                     p=self.dropout_rate,
                                     training=self.training)

        global_preference = self.bilinear_mapping(item_enc_hidden,
                                                  context_y_embedded)

        # calculate p(v | sequence) based on the global preference
        item_logits = torch.einsum('bd,vd->bv', global_preference,
                                   self.item_embedding.weight)

        if not self.training:
            item_probs = item_logits.softmax(dim=-1)
            recommended_list = []
            init_preds = item_probs.argmax(dim=-1)
            recommended_list.append(init_preds.detach())
            satisfactions = context_probs
            for _ in range(self.recommendation_len - 1):
                # calculate diversity using the embeddings of the given ids
                # selected = self.compute_distance(item_probs, recommended_list)
                selected, satisfactions = self.compute_highest_marginal_utility(
                    item_probs, satisfactions, recommended_list)
                recommended_list.append(selected)

            recommended_list = torch.stack(recommended_list, 1)
        else:
            recommended_list = torch.zeros(1, device=self.device)

        # return as a tuple for the trainer. This will be split up in the loss function.
        recommended_list = recommended_list.requires_grad_(False)
        context_preds = context_preds.requires_grad_(False)
        return Output(preds=recommended_list,
                      logits=item_logits,
                      attention=norm_attn,
                      context_logits=context_logits.view(batch_size, -1),
                      context_preds=context_preds)
示例#8
0
文件: vm.py 项目: cchen666/titamu
 def vm_list(self, args):
     # Call ovirtsdk to get vms_service
     vms_service = self.c.system_service().vms_service()
     # We check whether prefix has been given or not
     if args.prefix is not None:
         search_prefix = 'name=' + args.prefix + '*'
     else:
         search_prefix = 'name=' + environ.get('TITAMU_VM_PREFIX') + '*'
     try:
         # Get vm list by using vms_service
         vms = vms_service.list(
             search=search_prefix,
             case_sensitive=False,
         )
         rows = []
         for vm in vms:
             addr = ""
             comment = ""
             if vm.comment is not None:
                 comment = vm.comment
             vm_service = vms_service.vm_service(vm.id)
             # We use reported_devices_service method to get all the
             # NICs that have IP address assigned
             for device in vm_service.reported_devices_service().list():
                 if device.ips is not None and vm.status.value == 'up':
                     for ip in device.ips:
                         # cchen: ip.version is a enum so we have to use "value" to
                         # get its real value we are assuming that only the IP which
                         # starts with '10' is something that we are interested in
                         if ip.version.value == 'v4' and ip.address.startswith(
                                 '10'):
                             addr += ip.address + '  '
             rows.append(
                 [vm.id, vm.name,
                  vm.status.value.upper(), addr, comment])
         output = Output(["ID", "Name", "Status", "Networks", "Comment"],
                         rows)
         output.print_table()
         self.c.close()
     except sdk.Error as err:
         print "Failed to list VMs, %s" % str(err)
示例#9
0
文件: disk.py 项目: cchen666/titamu
    def disk_list(self, args):
        try:
            vms_service = self.c.system_service().vms_service()
            vm = vms_service.list(search=args.vm_name)[0]
            vm_service = vms_service.vm_service(vm.id)
            disk_attachments_service = vm_service.disk_attachments_service()
            disk_attachments = disk_attachments_service.list()
            rows = []
            for disk_attachment in disk_attachments:
                disk = self.c.follow_link(disk_attachment.disk)
                rows.append([
                    disk.id, disk.name,
                    str(disk.provisioned_size / 1024 / 1024 / 1024) + "G",
                    str(disk.status).upper()
                ])

            output = Output(["ID", "Name", "Size", "Status"], rows)
            output.print_table()
            self.c.close()
        except sdk.Error as err:
            print "Failed to list disks, %s" % str(err)
示例#10
0
    def forward(self, inputs):
        item_x, context_x, user_id = split_input(
            inputs,
            requires_grad=False,
            dtype=[torch.long, torch.float, torch.long],
            device=self.device)
        batch_size = item_x.shape[0]
        (item_enc_hidden, ctxt_enc_hidden,
         norm_attn) = self.encode(item_x, context_x, user_id)

        # user_embeddings = self.user_embedding(user_id)  # (batch x num_user_units)
        context_predict_input = torch.cat((item_enc_hidden, ctxt_enc_hidden),
                                          dim=-1)
        context_logits = self.context_MLP(context_predict_input)
        context_probs = context_logits.softmax(dim=-1)
        context_preds = context_logits.argmax(dim=-1)

        context_y_embedded = torch.einsum('bc, cd -> bd', context_probs,
                                          self.context_embedding)
        context_y_embedded = dropout(context_y_embedded,
                                     p=self.dropout_rate,
                                     training=self.training)

        global_preference = self.bilinear_mapping(item_enc_hidden,
                                                  context_y_embedded)

        # calculate p(v | sequence) based on the global preference
        item_logits = torch.einsum('bd,vd->bv', global_preference,
                                   self.item_embedding.weight)
        if not self.training:
            item_probs = item_logits.softmax(dim=-1)
            _, recommended_list = torch.topk(
                item_probs,
                self.recommendation_len,
                sorted=True,
            )
        else:
            recommended_list = torch.zeros(1, device=self.device)

        # return as a tuple for the trainer. This will be split up in the loss function.
        recommended_list = recommended_list.requires_grad_(False)
        context_preds = context_preds.requires_grad_(False)
        return Output(preds=recommended_list,
                      logits=item_logits,
                      attention=norm_attn,
                      context_logits=context_logits,
                      context_preds=context_preds)
示例#11
0
def make_storage_template():
    cft = CloudFormationTemplate(description="Refinery Platform storage")

    # Parameters
    cft.parameters.add(
        Parameter('StaticBucketName', 'String', {
            'Description': 'Name of S3 bucket for Django static files',
        }))
    cft.parameters.add(
        Parameter(
            'MediaBucketName',
            'String',
            {
                'Description':
                'Name of S3 bucket for Django media files',
                # make names DNS-compliant without periods (".") for
                # compatibility with virtual-hosted-style access and S3
                # Transfer Acceleration
                'AllowedPattern':
                '[a-z0-9\-]+',
                'ConstraintDescription':
                'must only contain lower case letters, numbers, and '
                'hyphens',
            }))
    cft.parameters.add(
        Parameter(
            'IdentityPoolName', 'String', {
                'Default': 'Refinery Platform',
                'Description': 'Name of Cognito identity pool for S3 uploads',
            }))
    cft.parameters.add(
        Parameter(
            'DeveloperProviderName', 'String', {
                'Default':
                'login.refinery',
                'Description':
                '"domain" by which Cognito will refer to users',
                'AllowedPattern':
                '[a-z\-\.]+',
                'ConstraintDescription':
                'must only contain lower case letters, periods, '
                'underscores, and hyphens'
            }))

    # Resources
    cft.resources.add(
        Resource(
            'StaticStorageBucket',
            'AWS::S3::Bucket',
            Properties({
                'BucketName': ref('StaticBucketName'),
                'AccessControl': 'PublicRead',
                'CorsConfiguration': {
                    'CorsRules': [{
                        'AllowedOrigins': ['*'],
                        'AllowedMethods': ['GET'],
                        'AllowedHeaders': ['Authorization'],
                        'MaxAge': 3000,
                    }]
                },
            }),
            DeletionPolicy('Retain'),
        ))
    cft.resources.add(
        Resource(
            'MediaStorageBucket',
            'AWS::S3::Bucket',
            Properties({
                'BucketName': ref('MediaBucketName'),
                'AccessControl': 'PublicRead',
                'CorsConfiguration': {
                    'CorsRules': [{
                        'AllowedOrigins': ['*'],
                        'AllowedMethods': ['POST', 'PUT', 'DELETE'],
                        'AllowedHeaders': ['*'],
                        'ExposedHeaders': ['ETag'],
                        'MaxAge': 3000,
                    }]
                }
            }),
            DeletionPolicy('Retain'),
        ))
    # Cognito Identity Pool for Developer Authenticated Identities Authflow
    # http://docs.aws.amazon.com/cognito/latest/developerguide/authentication-flow.html
    cft.resources.add(
        Resource(
            'IdentityPool', 'AWS::Cognito::IdentityPool',
            Properties({
                'IdentityPoolName': ref('IdentityPoolName'),
                'AllowUnauthenticatedIdentities': False,
                'DeveloperProviderName': ref('DeveloperProviderName'),
            })))
    cft.resources.add(
        Resource(
            'IdentityPoolAuthenticatedRole',
            'AWS::Cognito::IdentityPoolRoleAttachment',
            Properties({
                'IdentityPoolId': ref('IdentityPool'),
                'Roles': {
                    'authenticated': get_att('CognitoS3UploadRole', 'Arn'),
                }
            })))
    upload_role_trust_policy = {
        "Version":
        "2012-10-17",
        "Statement": [{
            "Effect": "Allow",
            "Principal": {
                "Federated": "cognito-identity.amazonaws.com"
            },
            "Action": "sts:AssumeRoleWithWebIdentity",
            "Condition": {
                "StringEquals": {
                    "cognito-identity.amazonaws.com:aud": ref('IdentityPool')
                },
                "ForAnyValue:StringLike": {
                    "cognito-identity.amazonaws.com:amr": "authenticated"
                }
            }
        }]
    }
    upload_access_policy = {
        "Version":
        "2012-10-17",
        "Statement": [{
            "Effect": "Allow",
            "Action": ["cognito-identity:*"],
            "Resource": "*"
        }, {
            "Action": ["s3:PutObject", "s3:AbortMultipartUpload"],
            "Effect": "Allow",
            "Resource": {
                "Fn::Sub":
                "arn:aws:s3:::${MediaStorageBucket}/uploads/"
                "${!cognito-identity.amazonaws.com:sub}/*"
            }
        }]
    }
    cft.resources.add(
        Resource(
            'CognitoS3UploadRole', 'AWS::IAM::Role',
            Properties({
                'AssumeRolePolicyDocument':
                upload_role_trust_policy,
                'Policies': [{
                    'PolicyName': 'AuthenticatedS3UploadPolicy',
                    'PolicyDocument': upload_access_policy,
                }]
            })))

    # Outputs
    cft.outputs.add(
        Output('IdentityPoolId', ref('IdentityPool'),
               {'Fn::Sub': '${AWS::StackName}IdentityPoolId'},
               'Cognito identity pool ID'))

    return cft
示例#12
0
def make_storage_template():
    cft = CloudFormationTemplate(description="Refinery Platform storage")
    # Parameters
    cft.parameters.add(
        Parameter('StaticBucketName', 'String', {
            'Description': 'Name of S3 bucket for Django static files',
        }))
    cft.parameters.add(
        Parameter(
            'MediaBucketName',
            'String',
            {
                'Description':
                'Name of S3 bucket for Django media files',
                # make names DNS-compliant without periods (".") for compatibility
                # with virtual-hosted-style access and S3 Transfer Acceleration
                'AllowedPattern':
                '[a-z0-9\-]+',
                'ConstraintDescription':
                'must only contain lower case letters, numbers, and hyphens',
            }))
    # Resources
    cft.resources.add(
        Resource(
            'StaticStorageBucket',
            'AWS::S3::Bucket',
            Properties({
                'BucketName': ref('StaticBucketName'),
                'AccessControl': 'PublicRead',
                'CorsConfiguration': {
                    'CorsRules': [{
                        'AllowedOrigins': ['*'],
                        'AllowedMethods': ['GET'],
                        'AllowedHeaders': ['Authorization'],
                        'MaxAge': 3000,
                    }]
                },
            }),
            DeletionPolicy('Retain'),
        ))
    cft.resources.add(
        Resource(
            'MediaStorageBucket',
            'AWS::S3::Bucket',
            Properties({
                'BucketName': ref('MediaBucketName'),
                'AccessControl': 'PublicRead',
                'CorsConfiguration': {
                    'CorsRules': [{
                        'AllowedOrigins': ['*'],
                        'AllowedMethods': ['POST', 'PUT', 'DELETE'],
                        'AllowedHeaders': ['*'],
                        'ExposedHeaders': ['ETag'],
                        'MaxAge': 3000,
                    }]
                }
            }),
            DeletionPolicy('Retain'),
        ))
    cft.outputs.add(
        Output('MediaBucketName', ref('MediaStorageBucket'),
               {'Fn::Sub': '${AWS::StackName}Media'},
               'Name of S3 bucket for Django media files'))

    return cft
示例#13
0
#!/usr/bin/env python

import argparse

from utils import Output, is_valid_user, arguments
from classes import Model

parser = argparse.ArgumentParser(description='Performs a data modeling pipeline',
                                 usage='python model.py user password [OPTIONS]')

input_username, input_password, db_name, db_host = arguments(parser)

output = Output()


def model_data():
    model = Model(input_username, input_password, db_name, db_host)

    # Create course content similarity DataFrame
    output.write('Create a course content similarity DataFrame')
    output.warning('This process can take a long time')
    output.start_spinner('Creating a course content similarity DataFrame')

    try:
        model.create_course_content_similarity_df()
        output.spinner_success()
    except Exception as err:
        output.spinner_fail(str(err))
        exit(1)

    # Save courses content similarities to database