Beispiel #1
0
def run():
    config_exo = {
        'access': 'EXO84e091ac6a22bf8fb7b916f2',
        'secret': '-DU8_52ivubZb0-hdrx788GKtbTjw7r32LUOlFKV_VA'
    }

    config_aws = {'access': '', 'secret': ''}

    provider_exo = Provider(name='exoscale', config=config_exo, region='')
    provider_aws = Provider(name='aws', config=config_aws, region='us-east-1')
    semaphore = threading.BoundedSemaphore(2)

    instance_exo = Instance(provider_exo, 'test', 'Medium',
                            'Linux Ubuntu 18.04 LTS 64-bit')
    instance_aws = Instance(provider_aws, 'COCA-BENCH', 't2.micro',
                            'ami-0a313d6098716f372')

    key = RSA.generate(2048)
    keypair = (key.exportKey('PEM'), key.publickey().exportKey('OpenSSH'))

    benchmark_exo = Benchmark(instance_exo, keypair, semaphore)
    # benchmark_aws = Benchmark(instance_aws, keypair, semaphore)

    print "start tests"
    benchmark_exo.start()
    # benchmark_aws.start()
    print "wait threads"
    benchmark_exo.join()
    # benchmark_aws.join()
    print "end"
Beispiel #2
0
def get_annotations_for_sentence(tokens):
    """
    Return entity extents, in (start, end, tagtype) token positions
    """
    instances = []
    current_tag = None
    extent_start = 0
    ext_char_start = 0
    previous_end = 0
    for i, (token, (start, end), tag) in enumerate(tokens):
        if tag[0] == "B":
            if current_tag:
                instance = Instance(ext_char_start, previous_end, current_tag,
                                    " ".join([token[2] for token in tokens[extent_start:i]]))
                instances.append(instance)
            current_tag = tag[2:]
            extent_start = i
            ext_char_start = start
        elif tag[0] == "I":
            pass
        elif tag[0] == "O":
            if current_tag:
                # print tokens
                instance = Instance(ext_char_start, previous_end, current_tag,
                                    "".join([token[0] for token in tokens[extent_start:i]]))
                instances.append(instance)
                current_tag = None
        previous_end = end
    if current_tag:
        instance = Instance(ext_char_start, end, current_tag,
                            "".join([token[0] for token in tokens[extent_start:i + 1]]))
        instances.append(instance)
    instances.sort()
    return instances
Beispiel #3
0
def senToInstan(sentence =None,segSentence2=None,segSentenceNumG=None,e1_list=None,e2_list = None,ListInstance=None):
    # sentence = S
    e1List = e1_list
    e2List = e2_list
    for e1 in e1List:
        for e2 in e2List:
            f=0
            # e1在前面
            if e1["index1"]<=e2["index2"]:
                for word in segSentenceNumG[e1["index1"]+1:e2["index2"]]:
                    # 判断了这两个词是不是在一句话中
                    if word in ['.', '。', ';', ';']:
                        f = f+1
                        if f>=2:
                            break
                if f==0 or f==1:
                    # 添加一个实例
                    instance = Instance(sentence=sentence,segSentence2 =segSentence2,segSentenceNumG = segSentenceNumG,e1=e1,e2 = e2 ,flag = 0)
                    ListInstance.append(instance)
            # e2在后面
            if e1["index1"]>e2["index2"]:
                for word in segSentenceNumG[e2["index2"]+1:e1["index1"]]:
                    if word in['.','。',';',';']:
                        f = f+1
                        if f>=2:
                            break
                if f==0 or f ==1:
                    instance = Instance(sentence=sentence,segSentence2 =segSentence2,segSentenceNumG = segSentenceNumG,e1=e1,e2 = e2 ,flag = 1)
                    ListInstance.append(instance)
    return ListInstance
Beispiel #4
0
    def get_instances(self):
        instancesList = list()

        if len(self.latency) == 1:
            sliced_data = self.get_time_channel_slice(None, self.latency, self.latency+1)
            instance = Instance(self.patient_id, self.latency, sliced_data, self.sampling_rate, self.number_of_channels)
            instancesList.append(instance)
        else:
            for second in self.latency[0:-1]:
                sliced_data = self.get_time_channel_slice(None, second, second+1)
                instance = Instance(self.patient_id, second, sliced_data, self.sampling_rate, self.number_of_channels)
                instancesList.append(instance)

        return instancesList
Beispiel #5
0
def createInstanceObjectList(processed_dataset):
    '''
    Creates a list of Instance objects from the tokenized input + label
    '''
    print('Reading instances...')
    instanceObjects = []

    #Els: read in tokenised lines
    #processed_data = []
    for item in processed_dataset:
        tokenized = []
        line = item[1]
        data = line.strip('\n')
        if data:
            all_words = word_tokenize(data)
            content = ' '.join([str(elem) for elem in all_words])
        label = item[2]
        #processed_data.append(tokenized + '\t' + str(label))
        instanceObject = Instance(content, label)
        for i, token in enumerate(content.split()):
            instanceObject.tokenDictionary[i + 1] = Token(token)
        if FeatureSelection.getInstance(featureFile).normalizeInstances:
            instanceObject.tokenDictionary = instanceObject.normalizeTokens()
        instanceObjects.append(instanceObject)
    return instanceObjects
    def get_instances(self, folder):

        instances = []
        labels = set()
        for author in os.listdir(folder):
            path = folder + "/" + author + "/agree-sent/"
            path_pos = folder + "/" + author + "/pos/"
            if os.path.exists(path) and os.path.exists(path_pos):
                for af in os.listdir(path):
                    current = os.path.join(path, af)
                    current_pos = os.path.join(
                        path_pos,
                        af.split('.')[0] + '.sent.okpuncs.props.pos')
                    if os.path.isfile(current) and os.path.isfile(current_pos):
                        agree_data = open(current, "rb")
                        pos_data = open(current_pos, "rb").readlines()
                        for x in agree_data:
                            x = x.strip()
                            id = int(x.split("@")[0])
                            y = pos_data[id].strip()
                            label = int(x.split("@")[1])
                            text = x.split("@")[2]
                            inst = Instance(text, label)
                            for tagtoken in y.split("):("):
                                tag = tagtoken.split(" ")[0].lstrip("(")
                                token = tagtoken.split(" ")[1]
                                token = Token(token, tag)
                                inst.add_token(token)
                            instances.append(inst)
                            labels.add(label)

        return instances, labels
Beispiel #7
0
    def _createInstance(self, line):
        """<method internal="yes"/>
        """
        params = {}
        splitted_line = line.split(' -- ')
        zorp_argv = splitted_line[0]

        try:
            arg = self.ZORPCTLCONF['ZORP_APPEND_ARGS']
            if arg:
                zorp_argv += " %s" % arg
        except KeyError:
            pass

        params['name'] = zorp_argv.split()[0]
        params['zorp_argv'] = " ".join(zorp_argv.split()[1:])

        zorpctl_argv = splitted_line[1] if len(splitted_line) > 1 else ""

        try:
            arg = self.ZORPCTLCONF['ZORPCTL_APPEND_ARGS']
            if arg:
                zorpctl_argv += " %s" % arg
        except KeyError:
            pass

        if zorpctl_argv:
            params.update(self._parseZorpctlArgs(zorpctl_argv))

        return Instance(**params)
Beispiel #8
0
 def get_instances(self, folder):
     # happiness/joy???????????????????????????
     labels_dict = {
         "hp": "joy",
         "sd": "sadness",
         "ag": "anger",
         "dg": "disgust",
         "sp": "surprise",
         "fr": "fear"
     }
     instances = []
     labels = set()
     tagger = PerceptronTagger(
     )  # load nltk perceptron just once to speed up tagging
     with open(folder) as f:
         for line in f:
             label, id, text = line.strip().split(
                 " ", 2)  # split by first two spaces only
             if label == "ne":  # ignore no emotion
                 continue
             inst = Instance(text, labels_dict[label])
             inst_tokenized = word_tokenize(text)
             inst_tagged = tagger.tag(inst_tokenized)
             for tokentag in inst_tagged:
                 token = Token(tokentag[0], tokentag[1])
                 inst.add_token(token)
             instances.append(inst)
             labels.add(label)
     return instances, labels
Beispiel #9
0
    def new(self, params):
        os.makedirs(params.out_dir, exist_ok=True)

        self.params = params
        w.write(params.out_dir + "/params.json",
                w.pretty_json(params.to_json()))

        self.sample = Sample().new(params)
        sample_file = params.out_dir + "/sample.fasta"
        w.write(sample_file, w.fasta(self.sample))

        art_prefix = params.out_dir + "/art"
        art = os.environ['ART_ILLUMINA']
        subprocess.run([
            art, "--in", sample_file, "--out", art_prefix, "--rndSeed",
            str(params.seed)
        ] + params.art_flags,
                       stdout=subprocess.DEVNULL)
        self.art_output = r.read(art_prefix + ".aln", r.aln(params.take_ref))

        self.instance = Instance().new(params, self.art_output)
        w.write(params.out_dir + "/instance.json",
                w.json(self.instance.to_json()))
        w.write(params.out_dir + "/instance.txt",
                w.text(self.instance.to_text()))
        w.write(params.out_dir + "/instance.stats.json",
                w.json(self.instance.stats()))

        return self
 def get_instances(self, label_file, xml_file):
     instances = []
     labels_final = set()
     tagger = PerceptronTagger(
     )  # load nltk perceptron just once to speed up tagging
     labels_dict = {
         0: "anger",
         1: "disgust",
         2: "fear",
         3: "joy",
         4: "sadness",
         5: "surprise"
     }
     tree = ET.parse(xml_file)
     root = tree.getroot()
     with open(label_file) as f:
         for sent, line in izip(root, f):
             id_xml = sent.attrib.values()[0]
             id_labels = line.rstrip().split()
             id_file = id_labels[0]
             if id_xml == id_file:
                 for i in sent.itertext():
                     text = i
                 labels = id_labels[1:]
                 label = labels.index(
                     str(max([int(label) for label in labels])))
                 inst = Instance(text, labels_dict[label])
                 inst_tokenized = word_tokenize(text)
                 inst_tagged = tagger.tag(inst_tokenized)
                 for tokentag in inst_tagged:
                     token = Token(tokentag[0], tokentag[1])
                     inst.add_token(token)
                 instances.append(inst)
                 labels_final.add(label)
         return instances, labels_final
Beispiel #11
0
    def new(self, root_dir, strains, aligned, sample):
        out_dir = root_dir + "/" + sample[0]

        os.makedirs(out_dir, exist_ok=True)

        # sample_json = out_dir + "/sample.json"
        # w.write(sample_json, w.json(s.to_json(sample)))

        sample_fasta = out_dir + "/sample.fasta"
        w.write(sample_fasta, w.fasta(s.to_fasta(sample)))

        art_prefix = out_dir + "/art"
        art = os.environ['ART_ILLUMINA']
        subprocess.run([
            art, "--in", sample_fasta, "--out", art_prefix, "--seqSys", "HS20",
            "--len", "100", "--fcov", "100"
        ],
                       stdout=subprocess.DEVNULL)
        take_ref = False
        art_output = r.read(art_prefix + ".aln", r.aln(take_ref))

        instance = Instance().new(strains, aligned, art_output)
        # w.write(out_dir + "/instance.json", w.json(instance.to_json()))
        w.write(out_dir + "/instance.txt", w.text(instance.to_text()))
        w.write(out_dir + "/instance.stats.json", w.json(instance.stats()))
Beispiel #12
0
    def extractFeatures(self):

        print "extracting features..."
        self.buildDicts()
        print "reading Author.csv..."
        with open('/home/bhanu/Downloads/dataRev2/Author.csv',
                  'rb') as csvfile:
            authorReader = csv.reader(csvfile)
            next(authorReader)

            #start reading authors and build their features
            for row in authorReader:
                instance = Instance()
                authorid = row[0]
                instance.id = authorid

                #extract feature words corresponding to author name
                namestr = row[1].split(' ')
                for w in namestr:
                    pw = self.preprocess(w)
                    wid = self.getFeatureId(pw)
                    instance.name.add(wid)

                #extract keywords corresponding to author's affiliation
                for string in row[2:]:
                    nwords = string.split(' ')
                    for w in nwords:
                        pw = self.preprocess(w)
                        wid = self.getFeatureId(pw)
                        instance.affiliations.add(wid)

                #extract features corresponding to co-authors of this author

                paperids = self.author_paperDict.get(authorid)
                if (paperids != None):
                    for pid in paperids:
                        for a in self.paper_authorDict[
                                pid]:  #add all authors to co-authors set
                            aid = self.getFeatureId("author" + a)
                            instance.co_authors.add(aid)
                    instance.co_authors.remove(
                        self.getFeatureId("author" + authorid))
                #what to do when author has no papers
                else:
                    instance.hasNoPapers = True  #data may be missing then
                    self.authors_noPapers += 1

                self.instanceList.append(instance)

        with open('instanceList.obj', 'w') as dumpfile:
            cPickle.dump(self.instanceList,
                         dumpfile,
                         protocol=cPickle.HIGHEST_PROTOCOL)
        with open('featureDict.obj', 'w') as dumpfile:
            cPickle.dump(self.featDict,
                         dumpfile,
                         protocol=cPickle.HIGHEST_PROTOCOL)

        print "finished extracting features."
Beispiel #13
0
 def updateHAInstance(self):
     # self.clearlog()
     instance_list = self.getInstanceFromController()
     HAInstance.init()
     for instance in instance_list[:]:
         # [self.id, self.name, self.host, self.status, self.network]
         vm = Instance(ha_instance=instance)
         HAInstance.addInstance(vm)
Beispiel #14
0
    def add_instance(cluster_id, instance):
        """

        :param cluster_id:
        :param instance:
        """
        print("add vm")
        vm = Instance(cluster_id = cluster_id, ha_instance = instance)
        HAInstance.instance_list.append(vm)
Beispiel #15
0
def main():
    inst_types = ['small','medium','large','xl']
    n_inst = ['0','1','2','3','4','5','6','7','8','9']

    # Esquema para ejecutar las soluciones directamente sobre las 40 instancias.
    results = pd.DataFrame()
    for t in inst_types:
        for n in n_inst:
            t = 'small'
            n = '0'
            inst_file = 'input/' + t + '_' + n + '.csv'
            inst = Instance(inst_file)

            # Solucion greedy.
            start_time = time.time()
            f_greedy, x_greedy, ratio_greedy = solve_instance_greedy(inst)
            greedy_time = time.time() - start_time

            # Solucion lp
            start_time = time.time()
            f_lp, x_lp, ratio_lp = solve_instance_lp(inst)
            lp_time = time.time() - start_time

            # Solucion lp alternativa
            start_time = time.time()
            f_lp_alt, x_lp_alt, ratio_alt = solve_instance_lp_alternativa(inst)
            lp_alt_time = time.time() - start_time

            # Guardamos la solucion de cada algoritmo para la instancia
            inst_sol = pd.concat([pd.Series(t), pd.Series(n),
                                  pd.Series(f_greedy), pd.Series(ratio_greedy), pd.Series(greedy_time),
                                  pd.Series(f_lp)    , pd.Series(ratio_lp)    , pd.Series(lp_time),
                                  pd.Series(f_lp_alt), pd.Series(ratio_alt)   , pd.Series(lp_alt_time)], axis=1)

            results = pd.concat([results, inst_sol], axis = 0, ignore_index = True)

    # Consolidamos los resultados totales y generamos metricas
    results.columns = ['TipoInstancia', 'NroInstancia',
                       'SolucionGreedy', 'RatioGreedy', 'TiempoGreedy',
                       'SolucionMatching', 'RatioMatching', 'TiempoMatching',
                       'SolucionAlternativa', 'RatioAlternativa', 'TiempoAlternativa']

    results['GAP_dist_lp_gr']   = round((results['SolucionMatching']    / results['SolucionGreedy'] - 1),2)
    results['GAP_ratio_lp_gr']  = round((results['RatioMatching']       / results['RatioGreedy'] - 1), 2)
    results['GAP_time_lp_gr']   = round((results['TiempoMatching']      / results['TiempoGreedy'] - 1), 2)
    results['GAP_dist_alt_gr']  = round((results['SolucionAlternativa'] / results['SolucionGreedy'] - 1),2)
    results['GAP_ratio_alt_gr'] = round((results['RatioAlternativa']    / results['RatioGreedy'] - 1), 2)
    results['GAP_time_alt_gr']  = round((results['TiempoAlternativa']   / results['TiempoGreedy'] - 1), 2)
    results['GAP_dist_alt_lp']  = round((results['SolucionAlternativa'] / results['SolucionMatching'] - 1),2)
    results['GAP_ratio_alt_lp'] = round((results['RatioAlternativa']    / results['RatioMatching'] - 1), 2)
    results['GAP_time_alt_lp']  = round((results['TiempoAlternativa']   / results['TiempoMatching'] - 1), 2)

    # Exportamos los resultados
    path = 'C:/Repo/Github/combinatorial_optimization/taxi_matching/'
    results.to_csv(path + 'results.csv')
    results.groupby('TipoInstancia').mean().to_csv(path + 'results_grouped.csv')
Beispiel #16
0
 def __init__(self, instanceName, timeLimit=1800, verbose=True):
     self._model = None
     self._modelVars_x = None
     self._modelVars_w = None
     self._modelVars_y = None
     self._modelSolution = None
     self._instance = Instance(instanceName)
     self._timeLimit = timeLimit
     self._verbose = verbose
     self._stats = Statistics(instanceName, "Integer Programming")
Beispiel #17
0
 def load(self, out_dir):
     self.params = Params().from_json(
         r.read(out_dir + "/params.json", r.json))
     self.sample = Sample().from_fasta(
         r.read(out_dir + "/sample.fasta", r.fasta_list))
     self.art_output = r.read(out_dir + "/art.aln",
                              r.aln(self.params.take_ref))
     self.instance = Instance().from_json(
         r.read(out_dir + "/instance.json", r.json))
     return self
def get_instances(members_names, instances_list):
    instances = []
    for i in range(len(members_names)):
        a_name = members_names[i]
        #an_id=os.popen("nova list | grep " + a_name + " | awk '{print $2}'").read().split("\n")[0]
        an_id = get_object_id(instances_list, a_name)
        instance = Instance(a_name, an_id)
        #print instance.__dict__
        instances.append(instance)

    return instances
Beispiel #19
0
 def init_task_processor(self, name, nr_cpus, mem, disk_space):
     # Googlefy instance name
     name = self.__format_instance_name(name)
     # Return a processor object with given resource requirements
     instance_config = self.__get_instance_config()
     # Create and return processor
     if self.is_preemptible:
         return PreemptibleInstance(name, nr_cpus, mem, disk_space,
                                    **instance_config)
     else:
         return Instance(name, nr_cpus, mem, disk_space, **instance_config)
Beispiel #20
0
 def addInstance(self, instance_id):
     if not self.checkInstanceExist(instance_id):
         return Response(code="failed",
                         message="instance %s doesn't exist" % instance_id,
                         data=None)
     elif not self.checkInstanceBootFromVolume(instance_id):
         return Response(code="failed",
                         message="instance %s doesn't booted from volume" %
                         instance_id,
                         data=None)
     elif not self.checkInstancePowerOn(instance_id):
         return Response(code="failed",
                         message="instance %s is not power on" %
                         instance_id,
                         data=None)
     else:
         try:
             # Live migration VM to cluster node
             final_host = self.checkInstanceHost(instance_id)
             if final_host == None:
                 final_host = self.liveMigrateInstance(instance_id)
             instance = Instance(
                 id=instance_id,
                 name=self.nova_client.getInstanceName(instance_id),
                 host=final_host,
                 status=self.nova_client.getInstanceState(instance_id),
                 network=self.nova_client.getInstanceNetwork(instance_id))
             self.sendUpdateInstance(final_host)
             self.instance_list.append(instance)
             message = "Cluster--Cluster add instance success ! The instance id is %s." % (
                 instance_id)
             logging.info(message)
             # result = {"code":"0","cluster id":self.id,"node":final_host,"instance id":instance_id,"message":message}
             result = Response(code="succeed",
                               message=message,
                               data={
                                   "cluster id": self.id,
                                   "node": final_host,
                                   "instance id": instance_id
                               })
         except Exception as e:
             print str(e)
             message = "Cluster--Cluster add instance fail ,please check again! The instance id is %s." % (
                 instance_id)
             logging.error(message)
             # result = {"code":"1","cluster id":self.id,"instance id":instance_id,"message":message}
             result = Response(code="failed",
                               message=message,
                               data={
                                   "cluster id": self.id,
                                   "instance id": instance_id
                               })
         finally:
             return result
 def __get_instances_for_sentence(self, start, end, text):
     instances = []
     for tag_type in self.document.annotation_set.types():
         #tag_type1 = tag_type.replace(" ", "_")
         if tag_type.startswith("__"): # and tag_type... Here for excluding
             continue
         for extent in self.document.annotation_set.extents_by_type(tag_type):
             if self.__is_overlap((start, end), (extent.start, extent.end)):
                 instance = Instance(extent.start, extent.end, tag_type, text[extent.start:extent.end])
                 instances.append(instance)
     instances.sort()
     return instances
Beispiel #22
0
def generatePseudoPairs(network, numItems):
    mytask = task.Task(inputNodes=settings.inputNodes,
                       hiddenNodes=settings.hiddenNodes,
                       outputNodes=settings.outputNodes,
                       populationSize=numItems,
                       auto=False).task
    pseudoInputs = mytask['inputPatterns']
    pseudoItems = [
        Instance(a,
                 network.predict(np.array([a]))[0]) for a in pseudoInputs
    ]
    return pseudoItems
Beispiel #23
0
 def setUp(self):
     self.graph1 = createGraphFromString("#########\n#[email protected]#\n#########\n")
     self.graph2 = createGraphFromString("########################\n#[email protected].#\n######################.#\n#d.....................#\n########################\n")
     self.graph3 = createGraphFromString("########################\n#...............b.C.D.f#\n#.######################\n#[email protected]#\n########################\n")
     self.graph4 = createGraphFromString("#################\n#i.G..c...e..H.p#\n########.########\n#j.A..b...f..D.o#\n########@########\n#k.E..a...g..B.n#\n########.#########l.F..d...h..C.m#\n#################\n")
     self.graph5 = createGraphFromString("########################\n#@..............ac.GI.b#\n###d#e#f################\n###A#B#C################\n###g#h#i################\n########################\n")
     self.instance1 = Instance(self.graph1)
     self.instance1.updateTokens()
     self.instance2 = Instance(self.graph2)
     self.instance2.updateTokens()
     self.instance3 = Instance(self.graph3)
     self.instance3.updateTokens()
     self.instance4 = Instance(self.graph4)
     self.instance4.updateTokens()
     self.instance5 = Instance(self.graph5)
     self.instance5.updateTokens()
     self.instance1.listPositions()
     self.instance2.listPositions()
     self.instance3.listPositions()
     self.instance4.listPositions()
     self.instance5.listPositions()
Beispiel #24
0
def main():

    filename = 'input/small_0.csv'
    inst = Instance(filename)

    # Visualizamos zonas, pasajeros y taxis.
    visualize_zones()
    visualize_paxs(inst)
    visualize_taxis(inst)

    # Muestra el grafico.
    plt.show()
Beispiel #25
0
 def update_tags_from_instance(self, instance_id):
     try:
         inst = Instance()
         inst.get_instance_details(instance_id)
         volume_ids = self.list_volumes_from_instances(instance_id)
         boot_vol_ids = self.list_boot_volumes_from_instances(instance_id)
         for id in volume_ids:
             self.update_backup_tags_from_volume(id)
         for id in boot_vol_ids:
             self.update_backup_tags_from_boot_volume(id)
     except Exception as e:
         pass
Beispiel #26
0
 def __init__(self, instanceName, Mu, Lambda, Phi, Omega, verbose=True):
     self._instance = Instance(instanceName)
     self._mu = Mu
     self._lambda = Lambda
     self._phi = Phi
     self._omega = Omega
     self._verbose = verbose
     self._stats = Statistics(instanceName, "Genetic Algorithm")
     self._bestIndividual = None
     self._defaultPermutation = createPermutations(
         [i for i in range(self._instance.m)])
     self._defaultIntervals = createIntervals(
         [i for i in range(self._instance.n)], self._instance.m)
Beispiel #27
0
def _get_instances_for_sentence(document, start, end, text):
    instances = []
    for tag_type in document.annotation_set.types():
        # skip internal representation
        if tag_type.startswith("__"):
            continue
        for extent in document.annotation_set.extents_by_type(tag_type):
            if _is_overlap((start, end), (extent.start, extent.end)):
                instance = Instance(extent.start, extent.end, tag_type,
                                    text[extent.start:extent.end])
                instances.append(instance)
    instances.sort()
    return instances
Beispiel #28
0
def run():
    parser = argparse.ArgumentParser(description='Run benchmarks in all instances defined by the config file')
    parser.add_argument("providers", help='Configuration file describing providers')
    parser.add_argument("instances", help='Configuration file describing instances to create and benchmark')
    parser.add_argument("--nThreads", type=int, help='Number of threads to start')
    parser.add_argument("--keyLength", type=int, default=2048, help='Length of the generated SSH key')
    args = parser.parse_args()

    key = RSA.generate(2048)
    keypair = (key.exportKey('PEM'), key.publickey().exportKey('OpenSSH'))

    pkey_name = "coca-bench.pem"

    os.system("chmod 777 " + pkey_name)
    file1 = open(pkey_name, "w")
    file1.write(keypair[0])
    file1.close()
    os.system("chmod 400 " + pkey_name)

    providers_config = ConfigParser.ConfigParser()
    providers_config.read(args.providers)
    instances_config = ConfigParser.ConfigParser()
    instances_config.read(args.instances)

    nthreads = len(instances_config.sections())
    if args.nThreads is not None:
        nthreads = args.nThreads
    semaphore = threading.BoundedSemaphore(nthreads)

    jobs = list()
    for instance_name in instances_config.sections():
        provider_name = instances_config.get(instance_name, "provider")
        flavor = instances_config.get(instance_name, "flavor")
        image = instances_config.get(instance_name, "image")
        provider_data = dict(providers_config.items(provider_name))
        username = None
        if instances_config.has_option(instance_name, "username"):
            username = instances_config.get(instance_name, "username")
        region = instances_config.get(instance_name, "region")
        provider = Provider(provider_name, region, provider_data)
        instance = Instance(provider, instance_name, flavor, image, username)
        bench = Benchmark(instance, keypair, semaphore)
        jobs.append(bench)

    for job in jobs:
        job.start()

    for job in jobs:
        job.join()

    print("all threads are finished")
 def addInstance(self, instance_id, send_flag=True):
     # if self.isProtected(instance_id): # check instance is already being protected
     # raise Exception("this instance is already being protected!")
     if not self.checkInstanceExist(instance_id):
         raise Exception("Not any node have this instance!")
     elif not self.checkInstanceGetVolume(instance_id):
         raise Exception("Instance don't have Volume")
     elif not self.checkInstancePowerOn(instance_id):
         raise Exception("this instance is power off!")
     else:
         try:
             # Live migration VM to cluster node
             # print "start live migration"
             final_host = self.checkInstanceHost(instance_id)
             if final_host == None:
                 final_host = self.liveMigrateInstance(instance_id)
             instance = Instance(
                 id=instance_id,
                 name=self.nova_client.getInstanceName(instance_id),
                 host=final_host,
                 status=self.nova_client.getInstanceState(instance_id),
                 network=self.nova_client.getInstanceNetwork(instance_id))
             if send_flag == True:
                 self.sendUpdateInstance(final_host)
             self.instance_list.append(instance)
             message = "Cluster--Cluster add instance success ! The instance id is %s." % (
                 instance_id)
             logging.info(message)
             # result = {"code":"0","cluster id":self.id,"node":final_host,"instance id":instance_id,"message":message}
             result = Response(code="succeed",
                               message=message,
                               data={
                                   "cluster_id": self.id,
                                   "node": final_host,
                                   "instance_id": instance_id
                               })
         except Exception as e:
             print str(e)
             message = "Cluster--Cluster add instance fail ,please check again! The instance id is %s." % (
                 instance_id)
             logging.error(message)
             # result = {"code":"1","cluster id":self.id,"instance id":instance_id,"message":message}
             result = Response(code="failed",
                               message=message,
                               data={
                                   "cluster_id": self.id,
                                   "instance_id": instance_id
                               })
         finally:
             return result
Beispiel #30
0
def runInstance(job):
    global mutex
    global num_running_jobs

    mutex.acquire()
    num_running_jobs += 1
    mutex.release()

    instance = Instance(job)
    instance.run()

    mutex.acquire()
    num_running_jobs -= 1
    mutex.release()