示例#1
0
def generate_input(filename, num_loc, num_homes, size, seed=None):
    print('Creating:', filename)
    g = graph_generate(num_loc, size, size, seed=seed)
    locations = list(g.nodes)
    homes = random.sample(locations, num_homes)
    start = random.sample(locations, 1)
    utils.clear_file(filename)
    utils.write_to_file(filename, str(len(locations)) + '\n', append=True)
    utils.write_to_file(filename, str(len(homes)) + '\n', append=True)
    utils.append_data_next_line(filename, locations, " ", append=True)
    utils.append_data_next_line(filename, homes, " ", append=True)
    utils.append_data_next_line(filename, start, " ", append=True)

    matrix = nx.to_numpy_matrix(g, weight='weight')
    entry_len = len(str(size)) + 6
    for r in matrix:
        for c in np.nditer(r):
            if c == 0:
                utils.write_to_file(filename,
                                    'x' + (' ' * entry_len),
                                    append='a')
            else:
                utils.write_to_file(filename,
                                    str(c) + (' ' *
                                              (entry_len + 1 - len(str(c)))),
                                    append='a')
        utils.append_next_line(filename)


#generate_input('inputs/50.in', 50, 25, 1000)
# g = graph_generate(3, 3,3)
# for u in g.edges.data('weight'):
#     print(u)
示例#2
0
    def download(self):
        logger.info('Downloading pics...')
        pix = Image(config.PIXABAY_API_KEY)
        i = 0

        while i < self.download_num:
            search = utils.generate_keyword()
            img_search = pix.search(q=search, page=1, per_page=30)
            hits = len(img_search['hits'])
            if hits:
                index = random.randint(0, hits - 1)
                url = img_search['hits'][index]['webformatURL']
                pic_path = config.PIC_PATH + str(i) + '.jpg'

                args = ','.join("{0}".format(arg) for arg in [url, pic_path])
                cmd = ['runp', 'Downloaders.py', 'downloader:' + args]
                p = subprocess.Popen(cmd)
                pid = utils.wait_timeout(p, config.DOWNLOAD_TIMEOUT)

                if pid is not None:
                    logger.info('Picture downloader ran successfully!')
                    store(self.id, url, 'pic')
                    self.tags.append(search)
                    i += 1
                else:
                    utils.clear_file(pic_path)
                    logger.info('Picture downloader timeout out!')
示例#3
0
 def clean_up(self):
     logger.info('Cleaning up...')
     self.set_status('Preparing to generate more content...')
     utils.clear_folder(config.VID_PATH)
     utils.clear_folder(config.GIF_PATH)
     utils.clear_folder(config.PIC_PATH)
     utils.clear_folder(config.SFX_PATH)
     utils.clear_file(self.result_path)
     utils.clear_mp3_files()
     time.sleep(5)
示例#4
0
import glob

from utils import clear_file

for file in glob.glob('./art/*.txt'):
    clear_file(file)
for file in glob.glob('./errors/*.txt'):
    clear_file(file)
示例#5
0
def solve_all(input_directory, output_directory, params=[]):
    if params[0] == 'naive':
        print("Using naive method")
        print("Clearing logs")
        utils.clear_file('logs/naive.log')
    elif params[0] == 'greedy':
        print('Using greedy method')
        print("Clearning logs")
        utils.clear_file('logs/greedy.log')
    elif params[0] == 'three_opt':
        print('Using three_opt method')
        print("Clearning logs")
        utils.clear_file('logs/three_opt.log')
    elif params[0] == 'ant_colony':
        print("Using ant colony optimization")
        print("Clearing logs")
        utils.clear_file("logs/ant_colony.log")
    elif params[0] == 'greedy_clustering_three_opt':
        print("Using greedy clustering three opt")
        print("Clearing logs")
        utils.clear_file("logs/greedy_clustering_three_opt.log")
    elif params[0] == 'mst':
        print("Using mst method")
        print("Clearing logs")
        utils.clear_file("logs/mst.log")
    input_files = utils.get_files_with_extension(input_directory, 'in')

    for input_file in input_files:
        solve_from_file(input_file, output_directory, params=params)
    print()
from utils import write_tagged_sentences, clear_file, num_found_idioms
from nltk.corpus import reuters


OUTPUT_FILE = "./data/tagged_sentences_retuers.txt"

sents = reuters.sents()

# Clear contents if the file exists
clear_file(OUTPUT_FILE)


## write to the file
success = write_tagged_sentences(reuters.sents(), OUTPUT_FILE)

if success:
    count = num_found_idioms(OUTPUT_FILE)
    print("number of idioms found (ish): {}".format(count))


            
示例#7
0
def main():
    start_time = time.time()
    options = get_arguments()
    logging.info("Options")
    logging.info(options)
    logging_level = logging.DEBUG if options["verbose"] else logging.ERROR
    print options['verbose']
    print logging_level
    logging.getLogger().setLevel(logging_level)

    filepath = options['filepath']
    clear_file(output_path)
    print "Start mask generation for file " + filepath
    if not os.path.isfile(filepath):
        print("File path {} does not exist. Exiting...".format(filepath))
        sys.exit()
    # split files if requested, get line counts
    if options['split']:
        total_lines, rejected_lines = split_files(filepath,
                                                  options["max_line_length"])
    else:
        total_lines = file_len(filepath)
        rejected_lines = file_len(split_path + "/rejected_lines")
    all_masks = []
    cumulated_generated_space = 0
    treated_lines = 0
    #only open split files of correct length
    for filename in os.listdir(split_path):
        if filename == "rejected_lines":
            continue

        if int(filename.split("file_")[1]) <= options['max_line_length']:
            with open(os.path.join(split_path, filename), 'r') as fp:
                #  lines_read, generated_space, masks = learning_algorithm(fp)
                lines_read, generated_space, masks = stat_algorithm(
                    fp, options["max_mask_combinations"],
                    options["mask_rejection_ratio"])
                treated_lines += lines_read
                cumulated_generated_space += generated_space
                print_status(lines_read, len(masks), cumulated_generated_space)
                print_masks_to_file(masks, lines_read, generated_space)
                all_masks += masks
                fp.close()
                logging.info("--- %s seconds ---" % (time.time() - start_time))
    else:
        total_hits = 0
        total_generated_space = 0
        for mask in all_masks:
            total_hits += mask.hitcount
            total_generated_space += mask.generated_space
        else:
            rejection_ratio = rejected_lines / float(total_lines) * 100
            coverage_ratio = total_hits / float(total_lines) * 100
            logging.info("Total Lines : " + str(total_lines))
            logging.info("Total Rejected Lines : " + str(rejected_lines))
            logging.info("Rejection Ratio : " + str(rejection_ratio))
            logging.info("\n")
            logging.info("Total treated lines : " + str(treated_lines))
            logging.info("Total hits : " + str(total_hits))
            logging.info("Coverage Ratio: {0:.2f}%".format(coverage_ratio))
        logging.info("Generated space " + str(total_generated_space))

        print "Masks Generated : " + str(len(all_masks))
        for mask in all_masks:
            print mask.maskstring
        if total_generated_space > options['max_generated_space']:
            print "Game Over"
        else:
            print "Victory"
        print_masks_to_file(all_masks, total_lines, total_generated_space)
        logging.info("--- %s seconds ---" % (time.time() - start_time))