def combine_landmarks_sort(self, add_landmark_path):

        origin_landmarks = {
            elem.split('_landmarks.txt')[0]: elem
            for elem in os.listdir(self.input_path) if elem.endswith('.txt')
        }
        add_landmarks = {
            elem.split('_noface')[0]: elem
            for elem in os.listdir(add_landmark_path) if elem.endswith('.txt')
        }
        utility.create_dir(self.output_path)

        for key in add_landmarks.keys():
            if key not in origin_landmarks.keys():
                print("{} is not exists in original list".format(key))
                continue
            add_landmark_file_path = "{}/{}".format(add_landmark_path,
                                                    add_landmarks[key])
            origin_landmarks_file_path = "{}/{}".format(
                self.input_path, origin_landmarks[key])
            origin_data = utility.read_file(origin_landmarks_file_path)
            add_data = utility.read_file(add_landmark_file_path)
            origin_data += add_data
            origin_data = sorted(origin_data)

            #print(key, add_landmarks[key],  origin_landmarks[key])

            dst_combine_landmark_path = '{}/{}_add_landmarks.txt'.format(
                self.output_path, key)
            utility.write_into_file(origin_data, dst_combine_landmark_path)
Example #2
0
    def read(self):
        wb_text = utility.read_file(self.wbFile, self.entryNo)
        info_text = utility.read_file(self.infoFile, self.entryNo)

        if info_text is not None and wb_text is not None:
            return "[%s]" % info_text[0], "%s" % wb_text[0]
        return "No information on this line", None
Example #3
0
File: wb.py Project: sp9999/Discord
    def wb(self):
        if not self.hasSearch:
            result = utility.read_file(self.filename, self.entryNo)
            if result is not None:
                line = result[0]
                index = result[1]
                count = result[2]
            else:
                return "Not enough stupid things have been said yet.", None
        else:
            result = utility.read_file(self.filename, self.entryNo, self.search)
            if result is not None:
                line = result[0]
                index = result[1]
                count = result[2]
                keyindex = result[3]
                keycount = result[4]
            else:
                return "Not enough stupid things have been said yet.", None

                # No occurrences of 'param_key' could be found
        if index == -1:
            return line, None

        if self.hasSearch:
            return "[%d/%d][%d/%d]" % (index, count, keyindex, keycount), "%s" % line

        return "[%d/%d]" % (index, count), "%s" % line
Example #4
0
 def parse_dell_asset_multiple(dell_asset_multiple_path):
     # Parse multiple dell asset object in a single path
     da_L = []
     lines = read_file(dell_asset_multiple_path,
                       isYML=False,
                       isURL=False,
                       lines=True)
     if lines is not None:
         i = 0
         while i < len(lines):
             while i < len(lines) and (lines[i] == "" or lines[i].find(
                     DellAsset.header) == 0):
                 i += 1
             if i < len(lines):
                 da = DellAsset(dellasset_str=','.join(lines[i].split(
                     ',')[0:DellAsset.header_num]))
                 warranty_L = []
                 while i < len(lines) and lines[i] != "" and lines[i].find(
                         DellAsset.header) < 0:
                     new_w = Warranty(warranty_str=','.join(lines[i].split(
                         ',')[DellAsset.header_num:]))
                     warranty_L.append(new_w)
                     i += 1
                 da.set_warranty_L(warranty_L)
                 da_L.append(da)
     return da_L
Example #5
0
def search_existing_dell_asset(svctags, update_translation=True):
    result = []
    possible_file_count = 0
    for s in svctags:
        if s == svc_placeholder:
            possible_file_count += 1
    possible_file_count = 36**possible_file_count
    pattern = compile_svc_pattern(svctags)
    if pattern is not None:
        existing_dell_asset_svctags = list_file_name_in_dir(
            existing_dell_asset_dir)
        target_dell_asset_S = set([])
        target_count = 0
        for svc in existing_dell_asset_svctags:
            if pattern.match(svc):
                target_dell_asset_S.add(svc)
                target_count += 1
            if target_count == possible_file_count:
                break
        result = DellAsset.parse_dell_asset_file_batch(existing_dell_asset_dir,
                                                       target_dell_asset_S,
                                                       logger=None)
    if update_translation:
        config = read_file(parent_path + file_config_name,
                           isYML=True,
                           isURL=False)
        result = update_dell_warranty_translation(
            config[config_translation_url], result, existing_dell_asset_dir)[0]
    return result
    def clean_list_certain_class_number_label(self, select_num):

        dst_path = self.input_folder_path.replace(
            '.txt', '_GE_{}_label.txt'.format(select_num))
        f = open(dst_path, 'w')

        data = utility.read_file(self.input_folder_path)
        list_container = []
        class_num = 0
        for index, elem in enumerate(data):
            if len(list_container) == 0:
                list_container.append(elem.split(' ')[0])
            else:
                current_prefix, current_name = utility.get_basename_parent_path(
                    elem.split(' ')[0])
                last_prefix, last_name = utility.get_basename_parent_path(
                    list_container[-1])
                #print(current_prefix, last_prefix)

                if current_prefix == last_prefix:
                    list_container.append(elem.split(' ')[0])
                else:
                    if len(list_container) >= select_num:
                        for line in list_container:
                            f.write('{} {}\n'.format(line, class_num))
                        class_num += 1
                    list_container = []
                    list_container.append(elem.split(' ')[0])

            utility.show_process_percentage(index, len(data) - 1, 1000)
    def create_pair_list(self,
                         image_list_path,
                         imageid_prefix='id',
                         imagelife_prefix='life'):

        data = utility.read_file(image_list_path)
        list_id = []
        list_life = []
        for line in data:
            if line.find(imageid_prefix) >= 0:
                list_id.append(line)
            elif line.find(imagelife_prefix) >= 0:
                list_life.append(line)
            else:
                print 'no {} or {} exist in {}'.format(imageid_prefix,
                                                       imagelife_prefix, line)

        dst_image_pair_list = image_list_path.replace('.txt', 'pair.txt')
        f = open(dst_image_pair_list, 'w')
        for index, life_elem in enumerate(list_life):
            for id_elem in list_id:
                id_name = id_elem.split('/')[-1].split('_')[0]
                life_name = life_elem.split('/')[-1].split('_')[0]
                if id_name == life_name:
                    label = 1
                else:
                    label = 0

                f.write('{} {} {}\n'.format(life_elem, id_elem, label))
            utility.show_process_percentage(index, len(list_life) - 1, 500)
        f.close()
Example #8
0
def collect_all_points(filename, decisions, objectives):
    indeps = []
    deps = []
    # print repeat
    t_indep, t_dep = read_file(filename, decisions, objectives)
    indeps += t_indep
    deps += t_dep
    return indeps, deps
Example #9
0
File: wb.py Project: sp9999/Discord
def wb_global(server_filename):
    result = utility.read_file(server_filename)
    if result is not None:
        line = result[0]
        index = result[1]
        count = result[2]

    return "[%d/%d]" % (index, count), "%s" % line
Example #10
0
def update_search_history(search_history_path, new_search_svctags_S):
    existing_search_dict = read_file(search_history_path, isYML=True, isURL=False)
    new_search_dict = {}
    current_date = get_current_datetime(is_format=False, is_date=True)
    for svc in new_search_svctags_S:
        if svc is not None and svc != "":
            new_search_dict[svc] = current_date
    existing_search_dict.update(new_search_dict)
    return save_object_to_path(existing_search_dict, search_history_path, isYML=True)
Example #11
0
def main(argv):
    '''
    :param Processes arguments from command line:
    :return:
    '''
    parser = argparse.ArgumentParser(description="Find items to buy with a gift card")
    parser.add_argument('filename', type=str, help='name of file containing items and prices')
    parser.add_argument('target', type=int, help='amount available in gift card')
    args = parser.parse_args()
    bonus(utility.read_file(args.filename),args.target)
Example #12
0
    def __init__(self, data_folder = ""):
        if data_folder == "":
            data_folder = self.DATA_FOLDER

        for key in self.CATEGORY:
            file_path = os.path.join(data_folder, "%s.txt" % key)

            self.classifier[key] = set()
            if os.path.isfile(file_path):
                self.classifier[key] = utility.read_file(file_path)
Example #13
0
def collect_all_points(subdir, decisions, objectives):
    indeps = []
    deps = []
    repeats = [subdir + f for f in os.listdir(subdir) if ".DS_Store" not in f]
    for repeat in repeats:
        # print repeat
        t_indep, t_dep = read_file(repeat, decisions, objectives)
        indeps += t_indep
        deps += t_dep
    return indeps, deps
    def create_fakeface_label_from_landmark(self, landmark_path):
        label_path = landmark_path.replace('.txt', '_label.txt')
        f = open(label_path, 'w')
        data = utility.read_file(landmark_path)
        for index, elem in enumerate(data):
            image_path = elem.split(' ')[0]
            label = image_path.split('/')[0].split('_')[0]
            f.write('{} {}\n'.format(image_path, label))
            utility.show_process_percentage(index, len(data) - 1, 500)

        f.close()
Example #15
0
	def parse_dell_asset_file(dell_asset_path):
		lines = read_file(dell_asset_path, isYML=False, isURL=False, lines=True)
		if lines is not None and len(lines) > 1:
			da = DellAsset(dellasset_str=','.join(lines[1].split(',')[0:DellAsset.header_num]))
			warranty_L = []
			for i in xrange(1, len(lines)):
				if lines[i] != "":
					warranty_L.append(Warranty(warranty_str=','.join(lines[i].split(',')[DellAsset.header_num:])))
			da.set_warranty_L(warranty_L)
			return da
		else:	
			return None
Example #16
0
def update_search_history(search_history_path, new_search_svctags_S):
    existing_search_dict = read_file(search_history_path,
                                     isYML=True,
                                     isURL=False)
    new_search_dict = {}
    current_date = get_current_datetime(is_format=False, is_date=True)
    for svc in new_search_svctags_S:
        if svc is not None and svc != "":
            new_search_dict[svc] = current_date
    existing_search_dict.update(new_search_dict)
    return save_object_to_path(existing_search_dict,
                               search_history_path,
                               isYML=True)
def read_program(status_path, traces_path, instrument_path):
    statuses = read_file(status_path).split('\n')
    traces = read_file(traces_path).split('\n')
    if len(statuses) == 0:
        print 'status length 0'
    if len(traces) == 0:
        print 'traces length 0'
    if len(statuses) != len(traces):
        print 'length of traces and status are different'
    testcases = list([])
    for i in xrange(0, len(statuses)):
        status = True if 0 == statuses[i].split(':')[1] else False
        trace = traces[i].split(':')[1]
        testcase = TestCase(trace, status)
        testcases.append(testcase)
    instrument = read_file(instrument_path).split('\n')
    if len(instrument) < 2:
        print 'can not read instrument info'
    fault_line = instrument[0].split(':')[1].split(',')
    fault = [int(e) for e in fault_line]
    total = int(instrument[1].split(':')[1])
    return Program(testcases, fault, total)
Example #18
0
def excuse_cmd(server, startIndex = None, subjectIndex = None, problemIndex = None):
    fileStart = utility.getFile(server, config.ExcuseFiles[0])
    fileSubject = utility.getFile(server, config.ExcuseFiles[1])
    fileProblem = utility.getFile(server, config.ExcuseFiles[2])

    if startIndex == "0":
        startIndex = None
    if subjectIndex == "0":
        subjectIndex = None
    if problemIndex == "0":
        problemIndex = None

    try:
        start, startIndex, startTotal = utility.read_file(fileStart, startIndex)
        subject, subjectIndex, subjectTotal = utility.read_file(fileSubject, subjectIndex)
        problem, problemIndex, problemTotal = utility.read_file(fileProblem, problemIndex)
    except ValueError:
        return "Not all files exist"
    except TypeError:
        return "Not all files exist"

    return "[%d/%d/%d] %s %s %s." %(startIndex, subjectIndex, problemIndex, start, subject, problem)
    def selected_certain_part_from_landmark_file(self, select_item, find=True):

        f = open(self.output_path, 'w')
        data = utility.read_file(self.input_path)

        for index, line in enumerate(data):

            if find:
                if line.find(select_item) >= 0:
                    f.write('{}\n'.format(line))
            else:
                if line.find(select_item) < 0:
                    f.write('{}\n'.format(line))
            utility.show_process_percentage(index, len(data) - 1, 500)
Example #20
0
def filter_search_history_expired(svc_S, search_history_path, day_offset):
    expired_S = set([])
    existing_search_dict = read_file(search_history_path,
                                     isYML=True,
                                     isURL=False)
    current_date = get_current_datetime()
    for svctag, search_date in existing_search_dict.items():
        pattern = compile_svc_pattern(svctag)
        for svc in svc_S:
            if pattern.match(svc) and diff_two_datetime(
                    current_date, search_date, days=True).days < day_offset:
                svc_S.remove(svc)
            else:
                expired_S.add(svc)
    return expired_S
    def select_face_recognition_result(self, possbility):

        dst_result_path = self.input_path.replace(
            '.txt', '_correct_possi{}_select.txt'.format(possbility))
        f = open(dst_result_path, 'w')

        data = utility.read_file(self.input_path)
        for index, elem in enumerate(data):
            true_result = float(elem.split(' ')[2])
            true_label = elem.split(' ')[1]
            prefict_label = elem.split(' ')[3]
            if true_label == prefict_label and true_result >= possbility:
                if not elem.endswith('\n'):
                    elem += '\n'
                f.write(elem)
            utility.show_process_percentage(index, len(data) - 1, 1000)
Example #22
0
def extract_dict(traces_path):
    traces = read_file(traces_path).split('\n')
    comb1 = set()
    comb2 = set()
    comb3 = set()
    for line in traces:
        if line is not '':
            all = line.split(":")[1].split(",")
            all = [int(e) for e in all if cast_to_int(e)]
            comb1 |= get_comb1(all)
            comb2 |= get_comb2(all)
            comb3 |= get_comb3(all)
    print "1", comb1
    print "2", comb2
    print "3", comb3
    return comb1, comb2, comb3
Example #23
0
def prepare_data():
    corpus_languages = ['english', 'spanish', 'french', 'portuguese']

    language = []
    texts = []

    for corpus_language in corpus_languages:
        text = read_file(
            'identification_langue/corpus_entrainement/{}-training.txt'.format(
                corpus_language))
        tokenizer = nltk.data.load(
            'tokenizers/punkt/{}.pickle'.format(corpus_language))

        sentences = tokenizer.tokenize(text.strip())
        texts.extend(sentences)
        language.extend([corpus_language] * len(sentences))

    return texts, language
Example #24
0
 def parse_dell_asset_file(dell_asset_path):
     lines = read_file(dell_asset_path,
                       isYML=False,
                       isURL=False,
                       lines=True)
     if lines is not None and len(lines) > 1:
         da = DellAsset(dellasset_str=','.join(lines[1].split(',')
                                               [0:DellAsset.header_num]))
         warranty_L = []
         for i in xrange(1, len(lines)):
             if lines[i] != "":
                 warranty_L.append(
                     Warranty(warranty_str=','.join(lines[i].split(
                         ',')[DellAsset.header_num:])))
         da.set_warranty_L(warranty_L)
         return da
     else:
         return None
Example #25
0
def translate_dell_warranty(yml_url_path, dell_asset_L, logger=None):
	tran_dict = read_file(yml_url_path, isYML=True, isURL=True)
	tran_dict = filter_NA_translation(tran_dict)
	if logger is not None:
		logger.info("Read translation from " + yml_url_path)
	NA_dict = {}
	for dell_asset in dell_asset_L:
		for w in dell_asset.get_warranty():
			if w is not None and w.service_en is not None:
				if w.service_ch == service_ch_placeholder:
					if w.service_en in tran_dict:
						w.set_service_ch(tran_dict[w.service_en])
						dell_asset.is_translation_updated = True
					else:
						NA_dict[w.service_en] = dell_asset.svctag
			else:
				if logger is not None:
					logger.warn("Warranty service name not valid for %s and \n%s" % (dell_asset.svctag, w))
	return dell_asset_L, NA_dict
Example #26
0
	def parse_dell_asset_multiple(dell_asset_multiple_path):
		# Parse multiple dell asset object in a single path
		da_L = []
		lines = read_file(dell_asset_multiple_path, isYML=False, isURL=False, lines=True)
		if lines is not None:
			i = 0
			while i < len(lines):
				while  i < len(lines) and (lines[i] == "" or lines[i].find(DellAsset.header) == 0):
					i += 1
				if  i < len(lines):
					da = DellAsset(dellasset_str=','.join(lines[i].split(',')[0:DellAsset.header_num]))
					warranty_L = []
					while i < len(lines) and lines[i] != "" and lines[i].find(DellAsset.header) < 0:
						new_w = Warranty(warranty_str=','.join(lines[i].split(',')[DellAsset.header_num:]))
						warranty_L.append(new_w)
						i += 1
					da.set_warranty_L(warranty_L)
					da_L.append(da)
		return da_L
    def create_patch_label_from_list(self, crop_list):

        data = utility.read_file(crop_list)

        dst_path = crop_list.replace('.txt', '_label.txt')
        f = open(dst_path, 'w')

        common_prefix = os.path.abspath(os.path.join(data[0], ".."))
        counter = 0
        for index, elem in enumerate(data):
            elem_prefix = os.path.abspath(os.path.join(elem, ".."))
            if elem_prefix != common_prefix:
                common_prefix = elem_prefix
                counter += 1

            f.write('{} {}\n'.format(elem, counter))
            sys.stdout.write('{}/{}\r'.format(index, len(data) - 1))
            sys.stdout.flush()

        f.close()
Example #28
0
def word2vec(filepath, query):

    global model
    modelfile = filepath + '.model'

    if not os.path.exists(modelfile):

        sentences = []
        for transactions in utility.read_file(filepath, '\t'):
            sentences.append(transactions)
    
        model = gensim.models.Word2Vec(sentences, min_count = 5)
        model.save(modelfile)
    else:
        if model is None:
            model = gensim.models.Word2Vec.load(modelfile)

    print '-------------------------'
    print 'raw word: ' + query
    for word, sim in model.most_similar(positive=[query]):
        print word, sim
Example #29
0
    def readData(self):
        # Chemins des fichiers
        # Les noms sont choisis de façon standard, on adapte le dossier
        self.adj_name = Generateur.dossier + "/adjectif.txt"
        self.article_name = Generateur.dossier + "/article.txt"
        self.verbe_name = Generateur.dossier + "/verbe.txt"
        self.nom_name = Generateur.dossier + "/nom.txt"
        self.adv_name = Generateur.dossier + "/adverbe.txt"
        self.prep_name = Generateur.dossier + "/preposition.txt"

        # Chargement des listes des mots à partir des fichiers
        self.preps = utility.read_file(self.prep_name)
        self.advs = utility.read_file(self.adv_name)
        self.adjs = utility.read_file(self.adj_name)
        self.articles = utility.read_file(self.article_name)
        self.noms = utility.read_file(self.nom_name)
        self.verbes = utility.read_file(self.verbe_name)
    def clean_unregular_landmark_items(self, landmark_path, rest=False):
        if not rest:
            clean_path = landmark_path.replace('.txt', '_clean.txt')
        else:
            clean_path = landmark_path.replace('.txt', '_unregular.txt')

        f = open(clean_path, 'w')
        data = utility.read_file(landmark_path)
        for index, elem in enumerate(data):
            elem_num = len(elem.split(' '))

            if elem.find('\n') < 0:
                elem += '\n'

            if not rest:
                #landmark elem should be 11
                if elem_num == 12:
                    f.write(elem)
            else:
                if elem_num != 12:
                    f.write(elem)
            utility.show_process_percentage(index, len(data) - 1, 500)
        f.close()
Example #31
0
def search_existing_dell_asset(svctags, update_translation=True):
    result = []
    possible_file_count = 0
    for s in svctags:
        if s == svc_placeholder:
            possible_file_count += 1
    possible_file_count = 36 ** possible_file_count
    pattern = compile_svc_pattern(svctags)
    if pattern is not None:
        existing_dell_asset_svctags = list_file_name_in_dir(existing_dell_asset_dir)
        target_dell_asset_S = set([])
        target_count = 0
        for svc in existing_dell_asset_svctags:
            if pattern.match(svc):
                target_dell_asset_S.add(svc)
                target_count += 1
            if target_count == possible_file_count:
                break
        result = DellAsset.parse_dell_asset_file_batch(existing_dell_asset_dir, target_dell_asset_S, logger=None)
    if update_translation:
        config = read_file(parent_path + file_config_name, isYML=True, isURL=False)
        result = update_dell_warranty_translation(config[config_translation_url] , result, existing_dell_asset_dir)[0]
    return result
Example #32
0
def generate(traces_path, comb1_path, comb2_path, comb3_path):
    comb1, comb2, comb3 = extract_dict(traces_path)
    traces = read_file(traces_path).split('\n')
    ret1 = []
    ret2 = []
    ret3 = []
    ret1.append("combination " + " ".join(comb1))
    ret2.append("combination " + " ".join(comb2))
    ret3.append("combination " + " ".join(comb3))
    for line in traces:
        if line is not '':
            dict1, dict2, dict3 = extract_feature(comb1, comb2, comb3, line)
            line1 = line.split(":")[0] + " " + convert_to_line(comb1, dict1)
            line2 = line.split(":")[0] + " " + convert_to_line(comb2, dict2)
            line3 = line.split(":")[0] + " " + convert_to_line(comb3, dict3)
            ret1.append(line1)
            ret2.append(line2)
            ret3.append(line3)
    text1 = "\n".join(ret1)
    text2 = "\n".join(ret2)
    text3 = "\n".join(ret3)
    write_file(text1, comb1_path)
    write_file(text2, comb2_path)
    write_file(text3, comb3_path)
    aggregated_indexes = []
    for i, chunk in enumerate(chunks):
        ret_indexes = non_dominated_sort(chunk)
        for ret_index in ret_indexes:
            aggregated_indexes.append(
                int(len(dependents) / size) * i + ret_index)

    aggregated_pfs = [dependents[i] for i in aggregated_indexes]
    aggregated_indexes_2 = non_dominated_sort(aggregated_pfs)

    ret_indexes = [aggregated_indexes[i] for i in aggregated_indexes_2]
    return ret_indexes


if __name__ == "__main__":
    data = read_file("./Data/Sac1_2.csv")
    dependents = [d.objectives for d in data]
    import time
    for size in xrange(1, 20):
        time_holder = []
        for _ in xrange(10):
            print ". ",
            sys.stdout.flush()
            star = time.time()
            pf_indexes = non_dominated_sort_fast(dependents, [False, True],
                                                 size)
            pf = [dependents[i] for i in pf_indexes]
            pf = sorted(pf, key=lambda x: x[0])
            time_holder.append(time.time() - star)
        print size, sum(time_holder) / len(time_holder)
Example #34
0
def run_main(files, repeat_no, stop, start_size):
    initial_time = time.time()
    all_data = {}
    initial_sample_size = start_size
    for file in files:
        all_data[file] = {}
        all_data[file]['evals'] = []
        all_data[file]['gen_dist'] = []
        all_data[file]['igd'] = []

        print file
        data = read_file('../Data/' + file + '.csv')

        # Creating Objective Dict
        objectives_dict = {}
        for d in data:
            key = ",".join(map(str, d.decisions))
            objectives_dict[key] = d.objectives

        number_of_objectives = len(data[0].objectives)
        number_of_directions = 10

        directions = [get_random_numbers(number_of_objectives) for _ in xrange(number_of_directions)]
        shuffle(data)

        training_indep = [d.decisions for d in data[:initial_sample_size]]
        testing_indep = [d.decisions for d in data[initial_sample_size:]]

        while True:
            print ". ",
            sys.stdout.flush()

            def get_objective_score(independent):
                key = ",".join(map(str, independent))
                return objectives_dict[key]

            training_dep = [get_objective_score(r) for r in training_indep]

            next_point_indexes = get_next_points(file, training_indep, training_dep, testing_indep, directions)
            # print "Points Sampled: ", next_point_indexes
            next_point_indexes = sorted(next_point_indexes, reverse=True)
            for next_point_index in next_point_indexes:
                temp = testing_indep[next_point_index]
                del testing_indep[next_point_index]
                training_indep.append(temp)
            # print len(training_indep), len(testing_indep), len(data)
            assert(len(training_indep) + len(testing_indep) == len(data)), "Something is wrong"
            if len(training_indep) > stop: break


        print
        print "Size of the frontier = ", len(training_indep), " Evals: ", len(training_indep),
        # Calculate the True ND
        training_dependent = [get_objective_score(r) for r in training_indep]
        approx_dependent_index = non_dominated_sort(training_dependent, lessismore[file], [r[0] for r in ranges[file]],
                                             [r[1] for r in ranges[file]])
        approx_dependent = sorted([training_dependent[i] for i in approx_dependent_index], key=lambda x: x[0])
        all_data[file]['evals'].append(len(training_indep))

        actual_dependent = [d.objectives for d in data]
        true_pf_indexes = non_dominated_sort(actual_dependent, lessismore[file], [r[0] for r in ranges[file]],
                                             [r[1] for r in ranges[file]])
        true_pf = sorted([actual_dependent[i] for i in true_pf_indexes], key=lambda x: x[0])
        print "Length of True PF: " , len(true_pf),
        print "Length of the Actual PF: ", len(training_dependent),
        all_data[file]['gen_dist'].append(generational_distance(true_pf, approx_dependent, ranges[file]))
        all_data[file]['igd'].append(inverted_generational_distance(true_pf, approx_dependent, ranges[file]))

        print " GD: ", all_data[file]['gen_dist'][-1],
        print " IGD: ", all_data[file]['igd'][-1]
        all_data[file]['time'] = time.time() - initial_time

        import pickle
        pickle.dump(all_data, open('PickleLocker_FlashB_'+str(start_size)+'_'+str(stop)+'/' + file + '_' + str(repeat_no) + '.p', 'w'))
Example #35
0
import sys, traceback, socket


required_arg_list = ['--parent_path=', '--svctag=', '--v=']
subject_temp = "%s 时间%s 标签%s"

if __name__ == "__main__":
	arguments = parse_cmd_args(sys.argv, required_arg_list)
	parent_path = arguments['parent_path']
	svctag = arguments['svctag']
	verbose = True if 'v' in arguments else False
	logger = Logger(verbose)
	start_time = get_current_datetime()
	logger.info("Prepare arguments for a job on %s in machine: %s " % (start_time, socket.gethostname()))
	log_output_path = "%slog/dellasset_%s.txt" % (parent_path, svctag)
	config = read_file(parent_path + file_config_name, isYML=True, isURL=False)
	if not config:
		logger.error("Config %s parsed as None; job quits" % (parent_path + file_config_name))
		sys.exit(-1)
	svc_L = svctag.split(svc_delimitor)
	history_valid_svctag_path = parent_path + "valid_svctags.txt"
	dell_asset_path = existing_dell_asset_dir
	search_history_path = parent_path + "search_history.yml"
	dell_asset_output_path = parent_path + "output_%s.xlsx" % svctag
	api_url = config['dell_api_url']
	api_key_L = config["dell_api_key"].values()
	transl_url = config[config_translation_url]
	dell_support_url = config['dell_support_url']
	output_dell_asset_L = []
	api_dell_asset_L = []
	NA_dict = {}
Example #36
0
    lessismore['./Data/wc-6d-c1.csv'] = [False, True]
    lessismore['./Data/wc-c1-3d-c1.csv'] = [False, True]
    lessismore['./Data/wc-c3-3d-c1.csv'] = [False, True]

    all_data = {}
    for file in files:
        all_data[file] = {}
        all_data[file]['evals'] = []
        all_data[file]['gen_dist'] = []
        all_data[file]['igd'] = []

        print file
        for _ in xrange(40):
            print ". ",
            sys.stdout.flush()
            data = read_file(file)
            splits = split_data(data, 40, 2, 58)
            temp = mmre_based_approach(splits[0], splits[1])

            predicted_dependent = build_model(temp, splits[2])

            actual_dependent = [d.objectives for d in splits[2]]

            true_pf_indexes = non_dominated_sort(actual_dependent,
                                                 lessismore[file])
            predicted_pf_indexes = non_dominated_sort(predicted_dependent,
                                                      lessismore[file])
            # Actual Dependent values of the predicted_pf
            predicted_actual = [
                actual_dependent[i] for i in predicted_pf_indexes
            ]
Example #37
0
 def __init__(self, path, outputPath):
     self.outputPath = outputPath
     self.texte = utility.read_file(path)
     self.constructionDic()