Ejemplo n.º 1
0
def run():
    config_exo = {
        'access': 'EXO84e091ac6a22bf8fb7b916f2',
        'secret': '-DU8_52ivubZb0-hdrx788GKtbTjw7r32LUOlFKV_VA'
    }

    config_aws = {'access': '', 'secret': ''}

    provider_exo = Provider(name='exoscale', config=config_exo, region='')
    provider_aws = Provider(name='aws', config=config_aws, region='us-east-1')
    semaphore = threading.BoundedSemaphore(2)

    instance_exo = Instance(provider_exo, 'test', 'Medium',
                            'Linux Ubuntu 18.04 LTS 64-bit')
    instance_aws = Instance(provider_aws, 'COCA-BENCH', 't2.micro',
                            'ami-0a313d6098716f372')

    key = RSA.generate(2048)
    keypair = (key.exportKey('PEM'), key.publickey().exportKey('OpenSSH'))

    benchmark_exo = Benchmark(instance_exo, keypair, semaphore)
    # benchmark_aws = Benchmark(instance_aws, keypair, semaphore)

    print "start tests"
    benchmark_exo.start()
    # benchmark_aws.start()
    print "wait threads"
    benchmark_exo.join()
    # benchmark_aws.join()
    print "end"
Ejemplo n.º 2
0
def senToInstan(sentence =None,segSentence2=None,segSentenceNumG=None,e1_list=None,e2_list = None,ListInstance=None):
    # sentence = S
    e1List = e1_list
    e2List = e2_list
    for e1 in e1List:
        for e2 in e2List:
            f=0
            # e1在前面
            if e1["index1"]<=e2["index2"]:
                for word in segSentenceNumG[e1["index1"]+1:e2["index2"]]:
                    # 判断了这两个词是不是在一句话中
                    if word in ['.', '。', ';', ';']:
                        f = f+1
                        if f>=2:
                            break
                if f==0 or f==1:
                    # 添加一个实例
                    instance = Instance(sentence=sentence,segSentence2 =segSentence2,segSentenceNumG = segSentenceNumG,e1=e1,e2 = e2 ,flag = 0)
                    ListInstance.append(instance)
            # e2在后面
            if e1["index1"]>e2["index2"]:
                for word in segSentenceNumG[e2["index2"]+1:e1["index1"]]:
                    if word in['.','。',';',';']:
                        f = f+1
                        if f>=2:
                            break
                if f==0 or f ==1:
                    instance = Instance(sentence=sentence,segSentence2 =segSentence2,segSentenceNumG = segSentenceNumG,e1=e1,e2 = e2 ,flag = 1)
                    ListInstance.append(instance)
    return ListInstance
Ejemplo n.º 3
0
    def get_instances(self, folder):

        instances = []
        labels = set()
        for author in os.listdir(folder):
            path = folder + "/" + author + "/agree-sent/"
            path_pos = folder + "/" + author + "/pos/"
            if os.path.exists(path) and os.path.exists(path_pos):
                for af in os.listdir(path):
                    current = os.path.join(path, af)
                    current_pos = os.path.join(
                        path_pos,
                        af.split('.')[0] + '.sent.okpuncs.props.pos')
                    if os.path.isfile(current) and os.path.isfile(current_pos):
                        agree_data = open(current, "rb")
                        pos_data = open(current_pos, "rb").readlines()
                        for x in agree_data:
                            x = x.strip()
                            id = int(x.split("@")[0])
                            y = pos_data[id].strip()
                            label = int(x.split("@")[1])
                            text = x.split("@")[2]
                            inst = Instance(text, label)
                            for tagtoken in y.split("):("):
                                tag = tagtoken.split(" ")[0].lstrip("(")
                                token = tagtoken.split(" ")[1]
                                token = Token(token, tag)
                                inst.add_token(token)
                            instances.append(inst)
                            labels.add(label)

        return instances, labels
Ejemplo n.º 4
0
 def get_instances(self, label_file, xml_file):
     instances = []
     labels_final = set()
     tagger = PerceptronTagger(
     )  # load nltk perceptron just once to speed up tagging
     labels_dict = {
         0: "anger",
         1: "disgust",
         2: "fear",
         3: "joy",
         4: "sadness",
         5: "surprise"
     }
     tree = ET.parse(xml_file)
     root = tree.getroot()
     with open(label_file) as f:
         for sent, line in izip(root, f):
             id_xml = sent.attrib.values()[0]
             id_labels = line.rstrip().split()
             id_file = id_labels[0]
             if id_xml == id_file:
                 for i in sent.itertext():
                     text = i
                 labels = id_labels[1:]
                 label = labels.index(
                     str(max([int(label) for label in labels])))
                 inst = Instance(text, labels_dict[label])
                 inst_tokenized = word_tokenize(text)
                 inst_tagged = tagger.tag(inst_tokenized)
                 for tokentag in inst_tagged:
                     token = Token(tokentag[0], tokentag[1])
                     inst.add_token(token)
                 instances.append(inst)
                 labels_final.add(label)
         return instances, labels_final
Ejemplo n.º 5
0
    def new(self, params):
        os.makedirs(params.out_dir, exist_ok=True)

        self.params = params
        w.write(params.out_dir + "/params.json",
                w.pretty_json(params.to_json()))

        self.sample = Sample().new(params)
        sample_file = params.out_dir + "/sample.fasta"
        w.write(sample_file, w.fasta(self.sample))

        art_prefix = params.out_dir + "/art"
        art = os.environ['ART_ILLUMINA']
        subprocess.run([
            art, "--in", sample_file, "--out", art_prefix, "--rndSeed",
            str(params.seed)
        ] + params.art_flags,
                       stdout=subprocess.DEVNULL)
        self.art_output = r.read(art_prefix + ".aln", r.aln(params.take_ref))

        self.instance = Instance().new(params, self.art_output)
        w.write(params.out_dir + "/instance.json",
                w.json(self.instance.to_json()))
        w.write(params.out_dir + "/instance.txt",
                w.text(self.instance.to_text()))
        w.write(params.out_dir + "/instance.stats.json",
                w.json(self.instance.stats()))

        return self
Ejemplo n.º 6
0
    def new(self, root_dir, strains, aligned, sample):
        out_dir = root_dir + "/" + sample[0]

        os.makedirs(out_dir, exist_ok=True)

        # sample_json = out_dir + "/sample.json"
        # w.write(sample_json, w.json(s.to_json(sample)))

        sample_fasta = out_dir + "/sample.fasta"
        w.write(sample_fasta, w.fasta(s.to_fasta(sample)))

        art_prefix = out_dir + "/art"
        art = os.environ['ART_ILLUMINA']
        subprocess.run([
            art, "--in", sample_fasta, "--out", art_prefix, "--seqSys", "HS20",
            "--len", "100", "--fcov", "100"
        ],
                       stdout=subprocess.DEVNULL)
        take_ref = False
        art_output = r.read(art_prefix + ".aln", r.aln(take_ref))

        instance = Instance().new(strains, aligned, art_output)
        # w.write(out_dir + "/instance.json", w.json(instance.to_json()))
        w.write(out_dir + "/instance.txt", w.text(instance.to_text()))
        w.write(out_dir + "/instance.stats.json", w.json(instance.stats()))
Ejemplo n.º 7
0
 def __init__(self, name, ind_radius=0, ind_k=0, path=PATH):
     Instance.__init__(self, name, ind_radius, ind_k, path)
     self.sensors = nx.Graph()
     self.sensors.add_node(0)
     self.neighbors = nx.Graph()
     self.sensors_sorted = [[0, [0., 0.]]]
     self.target_coverage = defaultdict(list)
Ejemplo n.º 8
0
def createInstanceObjectList(processed_dataset):
    '''
    Creates a list of Instance objects from the tokenized input + label
    '''
    print('Reading instances...')
    instanceObjects = []

    #Els: read in tokenised lines
    #processed_data = []
    for item in processed_dataset:
        tokenized = []
        line = item[1]
        data = line.strip('\n')
        if data:
            all_words = word_tokenize(data)
            content = ' '.join([str(elem) for elem in all_words])
        label = item[2]
        #processed_data.append(tokenized + '\t' + str(label))
        instanceObject = Instance(content, label)
        for i, token in enumerate(content.split()):
            instanceObject.tokenDictionary[i + 1] = Token(token)
        if FeatureSelection.getInstance(featureFile).normalizeInstances:
            instanceObject.tokenDictionary = instanceObject.normalizeTokens()
        instanceObjects.append(instanceObject)
    return instanceObjects
Ejemplo n.º 9
0
def get_annotations_for_sentence(tokens):
    """
    Return entity extents, in (start, end, tagtype) token positions
    """
    instances = []
    current_tag = None
    extent_start = 0
    ext_char_start = 0
    previous_end = 0
    for i, (token, (start, end), tag) in enumerate(tokens):
        if tag[0] == "B":
            if current_tag:
                instance = Instance(ext_char_start, previous_end, current_tag,
                                    " ".join([token[2] for token in tokens[extent_start:i]]))
                instances.append(instance)
            current_tag = tag[2:]
            extent_start = i
            ext_char_start = start
        elif tag[0] == "I":
            pass
        elif tag[0] == "O":
            if current_tag:
                # print tokens
                instance = Instance(ext_char_start, previous_end, current_tag,
                                    "".join([token[0] for token in tokens[extent_start:i]]))
                instances.append(instance)
                current_tag = None
        previous_end = end
    if current_tag:
        instance = Instance(ext_char_start, end, current_tag,
                            "".join([token[0] for token in tokens[extent_start:i + 1]]))
        instances.append(instance)
    instances.sort()
    return instances
Ejemplo n.º 10
0
 def get_instances(self, folder):
     # happiness/joy???????????????????????????
     labels_dict = {
         "hp": "joy",
         "sd": "sadness",
         "ag": "anger",
         "dg": "disgust",
         "sp": "surprise",
         "fr": "fear"
     }
     instances = []
     labels = set()
     tagger = PerceptronTagger(
     )  # load nltk perceptron just once to speed up tagging
     with open(folder) as f:
         for line in f:
             label, id, text = line.strip().split(
                 " ", 2)  # split by first two spaces only
             if label == "ne":  # ignore no emotion
                 continue
             inst = Instance(text, labels_dict[label])
             inst_tokenized = word_tokenize(text)
             inst_tagged = tagger.tag(inst_tokenized)
             for tokentag in inst_tagged:
                 token = Token(tokentag[0], tokentag[1])
                 inst.add_token(token)
             instances.append(inst)
             labels.add(label)
     return instances, labels
Ejemplo n.º 11
0
    def extractFeatures(self):

        print "extracting features..."
        self.buildDicts()
        print "reading Author.csv..."
        with open('/home/bhanu/Downloads/dataRev2/Author.csv',
                  'rb') as csvfile:
            authorReader = csv.reader(csvfile)
            next(authorReader)

            #start reading authors and build their features
            for row in authorReader:
                instance = Instance()
                authorid = row[0]
                instance.id = authorid

                #extract feature words corresponding to author name
                namestr = row[1].split(' ')
                for w in namestr:
                    pw = self.preprocess(w)
                    wid = self.getFeatureId(pw)
                    instance.name.add(wid)

                #extract keywords corresponding to author's affiliation
                for string in row[2:]:
                    nwords = string.split(' ')
                    for w in nwords:
                        pw = self.preprocess(w)
                        wid = self.getFeatureId(pw)
                        instance.affiliations.add(wid)

                #extract features corresponding to co-authors of this author

                paperids = self.author_paperDict.get(authorid)
                if (paperids != None):
                    for pid in paperids:
                        for a in self.paper_authorDict[
                                pid]:  #add all authors to co-authors set
                            aid = self.getFeatureId("author" + a)
                            instance.co_authors.add(aid)
                    instance.co_authors.remove(
                        self.getFeatureId("author" + authorid))
                #what to do when author has no papers
                else:
                    instance.hasNoPapers = True  #data may be missing then
                    self.authors_noPapers += 1

                self.instanceList.append(instance)

        with open('instanceList.obj', 'w') as dumpfile:
            cPickle.dump(self.instanceList,
                         dumpfile,
                         protocol=cPickle.HIGHEST_PROTOCOL)
        with open('featureDict.obj', 'w') as dumpfile:
            cPickle.dump(self.featDict,
                         dumpfile,
                         protocol=cPickle.HIGHEST_PROTOCOL)

        print "finished extracting features."
Ejemplo n.º 12
0
 def load(self, out_dir):
     self.params = Params().from_json(
         r.read(out_dir + "/params.json", r.json))
     self.sample = Sample().from_fasta(
         r.read(out_dir + "/sample.fasta", r.fasta_list))
     self.art_output = r.read(out_dir + "/art.aln",
                              r.aln(self.params.take_ref))
     self.instance = Instance().from_json(
         r.read(out_dir + "/instance.json", r.json))
     return self
Ejemplo n.º 13
0
 def update_tags_from_instance(self, instance_id):
     try:
         inst = Instance()
         inst.get_instance_details(instance_id)
         volume_ids = self.list_volumes_from_instances(instance_id)
         boot_vol_ids = self.list_boot_volumes_from_instances(instance_id)
         for id in volume_ids:
             self.update_backup_tags_from_volume(id)
         for id in boot_vol_ids:
             self.update_backup_tags_from_boot_volume(id)
     except Exception as e:
         pass
Ejemplo n.º 14
0
   def extractFeatures(self):
   
       print "extracting features..."
       self.buildDicts()
       print "reading Author.csv..."
       with open('/home/bhanu/Downloads/dataRev2/Author.csv', 'rb') as csvfile:
           authorReader = csv.reader(csvfile)
           next(authorReader)
           
           #start reading authors and build their features        
           for row in authorReader:
               instance = Instance() 
               authorid = row[0]
               instance.id = authorid
                    
               #extract feature words corresponding to author name
               namestr = row[1].split(' ')
               for w in namestr :                    
                   pw = self.preprocess(w)
                   wid = self.getFeatureId(pw)
                   instance.name.add(wid)
             
               #extract keywords corresponding to author's affiliation
               for string in row[2:]:
                   nwords = string.split(' ')
                   for w in nwords:
                       pw = self.preprocess(w)
                       wid = self.getFeatureId(pw)
                       instance.affiliations.add(wid)
                       
               #extract features corresponding to co-authors of this author
               
               paperids = self.author_paperDict.get(authorid)
               if(paperids != None):
                   for pid in paperids:
                       for a in self.paper_authorDict[pid]: #add all authors to co-authors set
                           aid = self.getFeatureId("author"+a)
                           instance.co_authors.add(aid)
                   instance.co_authors.remove(self.getFeatureId("author"+authorid))            
               #what to do when author has no papers
               else:
                   instance.hasNoPapers = True  #data may be missing then
                   self.authors_noPapers +=1
 
               self.instanceList.append(instance)
               
       with open('instanceList.obj', 'w') as dumpfile:
           cPickle.dump(self.instanceList, dumpfile, protocol=cPickle.HIGHEST_PROTOCOL)
       with open('featureDict.obj', 'w') as dumpfile:
           cPickle.dump(self.featDict, dumpfile, protocol=cPickle.HIGHEST_PROTOCOL)
       
       print "finished extracting features."
Ejemplo n.º 15
0
def runInstance(job):
    global mutex
    global num_running_jobs

    mutex.acquire()
    num_running_jobs += 1
    mutex.release()

    instance = Instance(job)
    instance.run()

    mutex.acquire()
    num_running_jobs -= 1
    mutex.release()
Ejemplo n.º 16
0
    def get_instances(self):
        instancesList = list()

        if len(self.latency) == 1:
            sliced_data = self.get_time_channel_slice(None, self.latency, self.latency+1)
            instance = Instance(self.patient_id, self.latency, sliced_data, self.sampling_rate, self.number_of_channels)
            instancesList.append(instance)
        else:
            for second in self.latency[0:-1]:
                sliced_data = self.get_time_channel_slice(None, second, second+1)
                instance = Instance(self.patient_id, second, sliced_data, self.sampling_rate, self.number_of_channels)
                instancesList.append(instance)

        return instancesList
Ejemplo n.º 17
0
 def __init__(self, compartment_id=None):
     self.volume_attachments = list()
     self.volume_backups = list()
     self.volume_backups_volume = UniqueKeyDict()
     self.attached_volume = UniqueKeyDict()
     self.instance_tags = UniqueKeyDict()
     self.volume_tags = UniqueKeyDict()
     self.compartment_id = compartment_id
     self.instanceObj = Instance()
     self.volumeObj = Volume()
     self.compartment_list = list()
     self.compartment_obj = Compartment()
     self.initialize()
     logging.info("VERSION 1.1")
Ejemplo n.º 18
0
    def _createInstance(self, line):
        """<method internal="yes"/>
        """
        params = {}
        splitted_line = line.split(' -- ')
        zorp_argv = splitted_line[0]

        try:
            arg = self.ZORPCTLCONF['ZORP_APPEND_ARGS']
            if arg:
                zorp_argv += " %s" % arg
        except KeyError:
            pass

        params['name'] = zorp_argv.split()[0]
        params['zorp_argv'] = " ".join(zorp_argv.split()[1:])

        zorpctl_argv = splitted_line[1] if len(splitted_line) > 1 else ""

        try:
            arg = self.ZORPCTLCONF['ZORPCTL_APPEND_ARGS']
            if arg:
                zorpctl_argv += " %s" % arg
        except KeyError:
            pass

        if zorpctl_argv:
            params.update(self._parseZorpctlArgs(zorpctl_argv))

        return Instance(**params)
Ejemplo n.º 19
0
 def updateHAInstance(self):
     # self.clearlog()
     instance_list = self.getInstanceFromController()
     HAInstance.init()
     for instance in instance_list[:]:
         # [self.id, self.name, self.host, self.status, self.network]
         vm = Instance(ha_instance=instance)
         HAInstance.addInstance(vm)
Ejemplo n.º 20
0
def createInstanceObjectList(inputFileName):
    '''
    Creates a list of Instance objects from the input file, which contain all the (linguistic) information
    needed to extract the features for sentiment polarity classification
    '''
    print ('Reading instances...')
    instanceObjects = []
    with codecs.open(inputFileName, 'r', 'utf8') as inputFile:
    	for line in inputFile:
            content, label = line.strip().split('\t')
            instanceObject = Instance(content, label)
            for i, token in enumerate(content.split()):
                instanceObject.tokenDictionary[i+1] = Token(token)
            if FeatureSelection.getInstance(featureFile).normalizeInstances:
                instanceObject.tokenDictionary = instanceObject.normalizeTokens()
            instanceObjects.append(instanceObject)
    return instanceObjects
Ejemplo n.º 21
0
    def add_instance(cluster_id, instance):
        """

        :param cluster_id:
        :param instance:
        """
        print("add vm")
        vm = Instance(cluster_id = cluster_id, ha_instance = instance)
        HAInstance.instance_list.append(vm)
Ejemplo n.º 22
0
 def getWorkflowInstancesData(self, workflow_id):
     wf = [w for w in self.workflows if str(w.id)==str(workflow_id)]
     result = {}
     if wf !=[]: 
         wf = wf[0]
         result["colnames"] = Instance.getTableNames()
         result["rows"] = [inst.getTableData() for inst in wf.instances]
         result["numrows"] = len(result["rows"])
     return result
Ejemplo n.º 23
0
    def req_create(self, request):

        try:
            document = minidom.parseString(request["data"])
            rootNode = document.documentElement

            if rootNode.nodeName != "vm":
                raise Exception("invalid root node")

        except:
            Logger.warn("Invalid xml input !!")
            doc = Document()
            rootNode = doc.createElement('error')
            rootNode.setAttribute("id", "name")
            doc.appendChild(rootNode)
            return self.req_answer(doc)

        ram = rootNode.getAttribute("ram")
        vcpu = rootNode.getAttribute("vcpu")
        master = rootNode.getAttribute("master")

        if not self.role_instance.pool.masters.has_key(master):
            return False

        obj_master = self.role_instance.pool.masters[master]

        new_id = self.role_instance.pool.get_last_instance(master)

        id_tmp = b36.b362int(new_id) + 1

        name = b36.int2b36(id_tmp, 4)

        instance = Instance(obj_master, name, self.role_instance.pool,
                            self.role_instance.virt_co)

        ret = self.role_instance.create_vm(instance, ram, vcpu)

        doc = Document()
        rootNode = doc.createElement("vm")
        rootNode.setAttribute("name", instance.get_name())
        rootNode.setAttribute("status", ret)
        doc.appendChild(rootNode)

        return self.req_answer(doc)
Ejemplo n.º 24
0
def main():
    inst_types = ['small','medium','large','xl']
    n_inst = ['0','1','2','3','4','5','6','7','8','9']

    # Esquema para ejecutar las soluciones directamente sobre las 40 instancias.
    results = pd.DataFrame()
    for t in inst_types:
        for n in n_inst:
            t = 'small'
            n = '0'
            inst_file = 'input/' + t + '_' + n + '.csv'
            inst = Instance(inst_file)

            # Solucion greedy.
            start_time = time.time()
            f_greedy, x_greedy, ratio_greedy = solve_instance_greedy(inst)
            greedy_time = time.time() - start_time

            # Solucion lp
            start_time = time.time()
            f_lp, x_lp, ratio_lp = solve_instance_lp(inst)
            lp_time = time.time() - start_time

            # Solucion lp alternativa
            start_time = time.time()
            f_lp_alt, x_lp_alt, ratio_alt = solve_instance_lp_alternativa(inst)
            lp_alt_time = time.time() - start_time

            # Guardamos la solucion de cada algoritmo para la instancia
            inst_sol = pd.concat([pd.Series(t), pd.Series(n),
                                  pd.Series(f_greedy), pd.Series(ratio_greedy), pd.Series(greedy_time),
                                  pd.Series(f_lp)    , pd.Series(ratio_lp)    , pd.Series(lp_time),
                                  pd.Series(f_lp_alt), pd.Series(ratio_alt)   , pd.Series(lp_alt_time)], axis=1)

            results = pd.concat([results, inst_sol], axis = 0, ignore_index = True)

    # Consolidamos los resultados totales y generamos metricas
    results.columns = ['TipoInstancia', 'NroInstancia',
                       'SolucionGreedy', 'RatioGreedy', 'TiempoGreedy',
                       'SolucionMatching', 'RatioMatching', 'TiempoMatching',
                       'SolucionAlternativa', 'RatioAlternativa', 'TiempoAlternativa']

    results['GAP_dist_lp_gr']   = round((results['SolucionMatching']    / results['SolucionGreedy'] - 1),2)
    results['GAP_ratio_lp_gr']  = round((results['RatioMatching']       / results['RatioGreedy'] - 1), 2)
    results['GAP_time_lp_gr']   = round((results['TiempoMatching']      / results['TiempoGreedy'] - 1), 2)
    results['GAP_dist_alt_gr']  = round((results['SolucionAlternativa'] / results['SolucionGreedy'] - 1),2)
    results['GAP_ratio_alt_gr'] = round((results['RatioAlternativa']    / results['RatioGreedy'] - 1), 2)
    results['GAP_time_alt_gr']  = round((results['TiempoAlternativa']   / results['TiempoGreedy'] - 1), 2)
    results['GAP_dist_alt_lp']  = round((results['SolucionAlternativa'] / results['SolucionMatching'] - 1),2)
    results['GAP_ratio_alt_lp'] = round((results['RatioAlternativa']    / results['RatioMatching'] - 1), 2)
    results['GAP_time_alt_lp']  = round((results['TiempoAlternativa']   / results['TiempoMatching'] - 1), 2)

    # Exportamos los resultados
    path = 'C:/Repo/Github/combinatorial_optimization/taxi_matching/'
    results.to_csv(path + 'results.csv')
    results.groupby('TipoInstancia').mean().to_csv(path + 'results_grouped.csv')
Ejemplo n.º 25
0
 def __init__(self, instanceName, timeLimit=1800, verbose=True):
     self._model = None
     self._modelVars_x = None
     self._modelVars_w = None
     self._modelVars_y = None
     self._modelSolution = None
     self._instance = Instance(instanceName)
     self._timeLimit = timeLimit
     self._verbose = verbose
     self._stats = Statistics(instanceName, "Integer Programming")
Ejemplo n.º 26
0
	def req_create(self,request):
		
		try:
			document = minidom.parseString(request["data"])
			rootNode = document.documentElement
		
			if rootNode.nodeName != "vm":
				raise Exception("invalid root node")
			
		except:
			Logger.warn("Invalid xml input !!")
			doc = Document()
			rootNode = doc.createElement('error')
			rootNode.setAttribute("id", "name")
			doc.appendChild(rootNode)
			return self.req_answer(doc)
		
		ram = rootNode.getAttribute("ram")
		vcpu = rootNode.getAttribute("vcpu")
		master = rootNode.getAttribute("master")
	
		if not self.role_instance.pool.masters.has_key(master):
			return False
		
		obj_master = self.role_instance.pool.masters[master]
		
		new_id = self.role_instance.pool.get_last_instance(master)
		
		id_tmp = b36.b362int(new_id) + 1
		
		name = b36.int2b36(id_tmp, 4)
		
		instance = Instance(obj_master, name, self.role_instance.pool, self.role_instance.virt_co)
		
		ret = self.role_instance.create_vm(instance, ram, vcpu)
		
		doc = Document()
		rootNode = doc.createElement("vm")
		rootNode.setAttribute("name",instance.get_name())
		rootNode.setAttribute("status",ret)
		doc.appendChild(rootNode)
				
		return self.req_answer(doc)
Ejemplo n.º 27
0
 def init_task_processor(self, name, nr_cpus, mem, disk_space):
     # Googlefy instance name
     name = self.__format_instance_name(name)
     # Return a processor object with given resource requirements
     instance_config = self.__get_instance_config()
     # Create and return processor
     if self.is_preemptible:
         return PreemptibleInstance(name, nr_cpus, mem, disk_space,
                                    **instance_config)
     else:
         return Instance(name, nr_cpus, mem, disk_space, **instance_config)
def get_instances(members_names, instances_list):
    instances = []
    for i in range(len(members_names)):
        a_name = members_names[i]
        #an_id=os.popen("nova list | grep " + a_name + " | awk '{print $2}'").read().split("\n")[0]
        an_id = get_object_id(instances_list, a_name)
        instance = Instance(a_name, an_id)
        #print instance.__dict__
        instances.append(instance)

    return instances
Ejemplo n.º 29
0
 def addInstance(self, instance_id):
     if not self.checkInstanceExist(instance_id):
         return Response(code="failed",
                         message="instance %s doesn't exist" % instance_id,
                         data=None)
     elif not self.checkInstanceBootFromVolume(instance_id):
         return Response(code="failed",
                         message="instance %s doesn't booted from volume" %
                         instance_id,
                         data=None)
     elif not self.checkInstancePowerOn(instance_id):
         return Response(code="failed",
                         message="instance %s is not power on" %
                         instance_id,
                         data=None)
     else:
         try:
             # Live migration VM to cluster node
             final_host = self.checkInstanceHost(instance_id)
             if final_host == None:
                 final_host = self.liveMigrateInstance(instance_id)
             instance = Instance(
                 id=instance_id,
                 name=self.nova_client.getInstanceName(instance_id),
                 host=final_host,
                 status=self.nova_client.getInstanceState(instance_id),
                 network=self.nova_client.getInstanceNetwork(instance_id))
             self.sendUpdateInstance(final_host)
             self.instance_list.append(instance)
             message = "Cluster--Cluster add instance success ! The instance id is %s." % (
                 instance_id)
             logging.info(message)
             # result = {"code":"0","cluster id":self.id,"node":final_host,"instance id":instance_id,"message":message}
             result = Response(code="succeed",
                               message=message,
                               data={
                                   "cluster id": self.id,
                                   "node": final_host,
                                   "instance id": instance_id
                               })
         except Exception as e:
             print str(e)
             message = "Cluster--Cluster add instance fail ,please check again! The instance id is %s." % (
                 instance_id)
             logging.error(message)
             # result = {"code":"1","cluster id":self.id,"instance id":instance_id,"message":message}
             result = Response(code="failed",
                               message=message,
                               data={
                                   "cluster id": self.id,
                                   "instance id": instance_id
                               })
         finally:
             return result
Ejemplo n.º 30
0
 def setUp(self):
     self.graph1 = createGraphFromString("#########\n#[email protected]#\n#########\n")
     self.graph2 = createGraphFromString("########################\n#[email protected].#\n######################.#\n#d.....................#\n########################\n")
     self.graph3 = createGraphFromString("########################\n#...............b.C.D.f#\n#.######################\n#[email protected]#\n########################\n")
     self.graph4 = createGraphFromString("#################\n#i.G..c...e..H.p#\n########.########\n#j.A..b...f..D.o#\n########@########\n#k.E..a...g..B.n#\n########.#########l.F..d...h..C.m#\n#################\n")
     self.graph5 = createGraphFromString("########################\n#@..............ac.GI.b#\n###d#e#f################\n###A#B#C################\n###g#h#i################\n########################\n")
     self.instance1 = Instance(self.graph1)
     self.instance1.updateTokens()
     self.instance2 = Instance(self.graph2)
     self.instance2.updateTokens()
     self.instance3 = Instance(self.graph3)
     self.instance3.updateTokens()
     self.instance4 = Instance(self.graph4)
     self.instance4.updateTokens()
     self.instance5 = Instance(self.graph5)
     self.instance5.updateTokens()
     self.instance1.listPositions()
     self.instance2.listPositions()
     self.instance3.listPositions()
     self.instance4.listPositions()
     self.instance5.listPositions()
Ejemplo n.º 31
0
 def __get_instances_for_sentence(self, start, end, text):
     instances = []
     for tag_type in self.document.annotation_set.types():
         #tag_type1 = tag_type.replace(" ", "_")
         if tag_type.startswith("__"): # and tag_type... Here for excluding
             continue
         for extent in self.document.annotation_set.extents_by_type(tag_type):
             if self.__is_overlap((start, end), (extent.start, extent.end)):
                 instance = Instance(extent.start, extent.end, tag_type, text[extent.start:extent.end])
                 instances.append(instance)
     instances.sort()
     return instances
Ejemplo n.º 32
0
def generatePseudoPairs(network, numItems):
    mytask = task.Task(inputNodes=settings.inputNodes,
                       hiddenNodes=settings.hiddenNodes,
                       outputNodes=settings.outputNodes,
                       populationSize=numItems,
                       auto=False).task
    pseudoInputs = mytask['inputPatterns']
    pseudoItems = [
        Instance(a,
                 network.predict(np.array([a]))[0]) for a in pseudoInputs
    ]
    return pseudoItems
Ejemplo n.º 33
0
def main():

    filename = 'input/small_0.csv'
    inst = Instance(filename)

    # Visualizamos zonas, pasajeros y taxis.
    visualize_zones()
    visualize_paxs(inst)
    visualize_taxis(inst)

    # Muestra el grafico.
    plt.show()
Ejemplo n.º 34
0
 def __createInstances(self, dataset):
     """
         Create all the instances of a dataset
         and a list of all the instances
     """
     rows, cols = dataset.shape
     inst = []
     for col in range(cols):
         tmp, mean, std = self.__standardise(dataset[:, col])
         pic = tmp.reshape(self.width, self.height, order='F')
         instance = Instance()
         instance.setValue(pic)
         instance.setMean(mean)
         instance.setStd(std)
         inst.append(instance)
     return np.array(inst)