def parse_args_and_setup():
    # parse and print arguments
    utils.parse_arguments()
    if utils.global_args.mode == 'stats':
        utils.quit('stats mode not supported in data_analytics program; only output mode supported')
    if utils.global_args.mode == 'input':
        utils.quit('input mode not supported in data_analytics program; only output mode supported')

    return
def data_input_parse_args():

    # parse and print arguments
    utils.parse_arguments()

    if utils.global_args.mode == 'stats':
        utils.quit('stats mode not supported in data_input program; only input mode supported')
    if utils.global_args.mode == 'output':
        utils.quit('output mode not supported in data_input program; only input mode supported')

    if utils.global_args.inputxls == "" and utils.global_args.inputxml == "":
        utils.quit('both input excel file and xml file cannot be empty')
示例#3
0
def create_cpp_ros_package():

    (package, depends) = utils.parse_arguments(['roscpp','std_msgs'])
    # Make directories
    p = os.path.abspath(package)
    os.makedirs(os.path.join(p,"src"))
    print "Created package directory 'src'"
    os.makedirs(os.path.join(p,"src","lib"))
    print "Created package directory 'src/lib'"
    os.makedirs(os.path.join(p,"include"))
    print "Created package directory 'include'"
    os.makedirs(os.path.join(p,"include",package))
    print "Created package directory 'include/%s'"%package
    os.makedirs(os.path.join(p,"launch"))
    print "Created package directory 'launch'"

    # Text files
    manifest_depends = ''.join(['  <depend package="%s"/>\n'%d for d in depends])
    cmake_depends = ''.join(['%s '%d for d in depends])
    templates = get_templates(package)
    for filename, template in templates.iteritems():
        contents = utils.instantiate_template(template, package, package, package, utils.author_name(), manifest_depends, cmake_depends)
        try:
            p = os.path.abspath(os.path.join(package, filename))
            f = open(p, 'w')
            f.write(contents.encode('utf-8'))
            print "Created package file", p
        finally:
            f.close()
    utils.print_concluding_catkin_message(package)
def main():
    global output, events, file_name

    args = parse_arguments()
    output = os.environ['OUTPUT'] if 'OUTPUT' in os.environ else args.output

    start = os.environ['START'] if 'START' in os.environ else args.start
    start_date = datetime.strptime(start, '%Y-%m-%d')
    end = os.environ['END'] if 'END' in os.environ else args.end
    final_end_date = end_date = datetime.strptime(end, '%Y-%m-%d')
    days = (end_date - start_date).days
    file_name = 'osu-events-from-{}-to-{}'.format(start, end)

    events = []
    while days > 365:
        end_date = start_date + timedelta(days=365)
        end = end_date.strftime('%Y-%m-%d')

        events += get_events(start, end)

        start_date = datetime.strptime(end, '%Y-%m-%d') + timedelta(days=1)
        start = start_date.strftime('%Y-%m-%d')
        end_date = final_end_date
        end = end_date.strftime('%Y-%m-%d')

        days = (end_date - start_date).days

    events += get_events(start, end)

    fields = [event_filter for event_filter in send_request(EVENT_FILTERS_URL).keys()]
    fields.append('departments')
    create_report_by(fields)
示例#5
0
def create_ros_package(type):
    
	is_catkin=True
	if type == 'ros-legacy':
		is_catkin=False
	(package, depends) = utils.parse_arguments([], is_catkin)
			
	p = os.path.abspath(package)
	os.makedirs(p) 
	manifest_depends = ''.join(['  <depend package="%s"/>\n'%d for d in depends])
	cmake_depends = ''.join(['%s '%d for d in depends])
	p = os.path.abspath(package)
	templates = get_ros_text_templates(type)
	for filename, template in templates.iteritems():
		contents = utils.instantiate_template(template, package, package, package, utils.author_name(), manifest_depends, cmake_depends)
		try:
			p = os.path.abspath(os.path.join(package, filename))
			f = open(p, 'w')
			f.write(contents.encode('utf-8'))
			print "Created package file", p
		finally:
			f.close()
	if type == 'ros-legacy':
		utils.print_concluding_message(package)
	else:
		utils.print_concluding_catkin_message(package)
示例#6
0
def create_winros_catkin_package():
    (package, depends) = utils.parse_arguments([])
    if 'roscpp' in depends:
        create_cpp_package_directory(package)
        templates = get_cpp_templates(package)
    elif 'rospy' in depends:
        create_py_package_directory(package)
        templates = get_py_templates(package)
    else:
        create_common_package_directory(package)
        templates = get_common_templates(package)
    build_depends = ''.join(
        ['  <build_depend>%s</build_depend>\n' % d for d in depends])
    run_depends = ''.join(
        ['  <run_depend>%s</run_depend>\n' % d for d in depends])
    cmake_depends = ''.join(['%s ' % d for d in depends])
    for filename, template in templates.iteritems():
        contents = utils.instantiate_template(template, package, package,
                                              utils.author_name(),
                                              build_depends, run_depends,
                                              cmake_depends)
        try:
            p = os.path.abspath(os.path.join(package, filename))
            f = open(p, 'w')
            f.write(contents.encode('utf-8'))
            print "Created package file", p
        finally:
            f.close()
    print "\nPlease edit package.xml, mainpage.dox and CMakeLists.txt to finish creating your package."
示例#7
0
def main(arguments):
    """Main run code for processing embeddings."""

    # Parse the arguments
    args = utils.parse_arguments(arguments)

    # load the embeddings data object
    path_to_embeddings = args.infile + '.embedded.pkl'
    embeddings, _ = utils.load_data(path_to_embeddings, 'dict')
    tf.logging.log(
        tf.logging.INFO,
        '{} lines in embeddings: {}'.format(len(embeddings.keys()),
                                            path_to_embeddings))

    all_embeddings = []
    with open(path_to_embeddings + '_unique_strings.csv', 'wb') as outfile:
        for k, v in embeddings.items():
            output_line = v['line'].encode('utf-8')
            if args.pairs:
                output_line_response = v['response'].encode('utf-8')
                outfile.write(output_line + args.delimiter.encode('utf-8') +
                              output_line_response + b'\n')
            else:
                outfile.write(output_line + b'\n')
            all_embeddings.append(v['line_embedding'])

    # Convert to a numpy array
    all_embeddings_np = np.array([np.array(xi) for xi in all_embeddings])
    array_outfile = path_to_embeddings + '_unique_strings_embeddings.txt'
    np.savetxt(array_outfile, all_embeddings_np)

    # Log the embedding shape
    tf.logging.log(tf.logging.INFO,
                   'Embedings shape {}'.format(all_embeddings_np.shape))
示例#8
0
def main(arguments):
    """Main run function to process the pairs data."""

    # Parse the arguments
    args = utils.parse_arguments(arguments)

    tf.logging.info('Select and save {} random pairs...'.format(
        args.num_lines))
    tf.logging.info('Input file: {}'.format(args.infile))

    # load the lines
    lines, _ = utils.load_data(args.infile, dest_type='list', delimiter='\n')
    tf.logging.info("Loaded {} lines: {}".format(len(lines), args.infile))

    with open(args.outfile, 'w', encoding='iso-8859-1') as outputfile:
        writer = csv.writer(outputfile, delimiter=args.delimiter)
        collected_pairs = utils.extract_pairs_from_lines(lines)
        random_idxs = np.random.choice(len(collected_pairs),
                                       args.num_lines,
                                       replace=False)
        for random_id in random_idxs:
            pair = collected_pairs[random_id]
            writer.writerow(pair)

    tf.logging.info('Wrote {} pairs to {}.'.format(args.num_lines,
                                                   args.outfile))
  def __init__(self):
    self.args = utils.parse_arguments()

    chrome_application_path = utils.get_application_path()
    chrome_application_directory = os.path.dirname(chrome_application_path)
    self.ipc_fuzzer_binary = utils.get_fuzzer_application_name()
    self.ipc_fuzzer_binary_path = os.path.join(
        chrome_application_directory, self.ipc_fuzzer_binary)
示例#10
0
    def __init__(self):
        self.args = utils.parse_arguments()

        chrome_application_path = utils.get_application_path()
        chrome_application_directory = os.path.dirname(chrome_application_path)
        self.ipc_fuzzer_binary = utils.get_fuzzer_application_name()
        self.ipc_fuzzer_binary_path = os.path.join(
            chrome_application_directory, self.ipc_fuzzer_binary)
示例#11
0
文件: train.py 项目: KEHANG/vqa
def main():

    args = parse_arguments()
    image_model_name = args.image_model_name
    embedding_type = args.embedding_type
    embedding_dim = args.embedding_dim
    batch_size = args.batch_size
    train(image_model_name, embedding_type, embedding_dim, batch_size)
示例#12
0
def update_event(em, user_id, msg, all_ids, all_names, user_email='', user_name='', create=False):
    '''
    create an event from the message.  
    and add to event manager  
    '''  
    arg_dict = parse_arguments(msg)
 
    if create:  
        event = Event(user_id, user_email, user_name) 
    else:
        event = em.find_event(user_id)
 
    if '-guests' in arg_dict:  
        guests = parse_mentions(arg_dict['-guests']) 

        # check if valid guests        
        for guest in guests:
            if guest not in all_ids: 
                print("Error invalid guests")
                return em, False 

        # find relevant users' id, email, name
        ids = [] 
        names = []
        for id, name in zip(all_ids, all_names):
            if id in guests:
                ids.append(id)
                names.append(name) 

        # get guest ids and names
        event.set_guests(ids)  
        #event.set_guest_emails(emails)  
        event.set_guest_names(names)  

    if '-title' in arg_dict: 
        event.set_title(arg_dict['-title'])
    
    if '-date' in arg_dict:     
        event.set_date(arg_dict['-date'])

    if '-time' in arg_dict: 
        event.set_time(arg_dict['-time'])

    if '-len' in arg_dict:
        event.set_length(arg_dict['-len'])

    if '-loc' in arg_dict:
        event.set_loc(arg_dict['-loc'])

    if '-alert' in arg_dict:
        event.set_alert(arg_dict['-alert'])

    if '-desc' in arg_dict:
        event.set_desc(arg_dict['-desc'])
   
    em.add_event(user_id, event)  
    return em, True   
示例#13
0
  def __init__(self):
    self.args = utils.parse_arguments()

    self.ipc_message_util_binary = utils.application_name_for_platform(
        IPC_MESSAGE_UTIL_APPLICATION)
    self.ipc_fuzzer_binary = utils.get_fuzzer_application_name()
    self.ipc_message_util_binary_path = utils.get_application_path(
        self.ipc_message_util_binary)
    self.ipc_fuzzer_binary_path = utils.get_application_path(
        self.ipc_fuzzer_binary)
示例#14
0
  def __init__(self):
    self.args = utils.parse_arguments()

    self.ipc_message_util_binary = utils.application_name_for_platform(
        IPC_MESSAGE_UTIL_APPLICATION)
    self.ipc_fuzzer_binary = utils.get_fuzzer_application_name()
    self.ipc_message_util_binary_path = utils.get_application_path(
        self.ipc_message_util_binary)
    self.ipc_fuzzer_binary_path = utils.get_application_path(
        self.ipc_fuzzer_binary)
示例#15
0
def main():

    args = parse_arguments()
    weights_file = args.weights_file[0]
    image_model_name = args.image_model_name
    embedding_type = args.embedding_type
    embedding_dim = args.embedding_dim
    batch_size = args.batch_size
    evaluate(weights_file, image_model_name, embedding_type, embedding_dim,
             batch_size)
示例#16
0
def main():
    args = parse_arguments()
    setup_spark()

    df_u, df_i, df_r = read_file(args)
    print('='*100 + args.users_path)
    df_u.show()
    df_u.printSchema()
    df_u.select('Age').summary().show()
    df_u.createOrReplaceTempView("users")
    df2 = sc.sql("select count(*) from (select Age, count(*) from users group by Age) as a")
    df2.show()
    print('='*100 + args.users_path)
    df_i.show()
示例#17
0
def main(arguments):
    """Main run function for interacting with the model."""

    # Parse the arguments
    args = utils.parse_arguments(arguments)

    tf.logging.info('Loading unique strings.')

    data_path = args.infile
    unique_strings_path = data_path + '.embedded.pkl_unique_strings.csv'
    # load the unique lines
    with open(unique_strings_path) as f:
        unique_strings = [line.rstrip() for line in f]

    tf.logging.info('Lodaded {} unique strings'.format(len(unique_strings)))

    # define the path of the nearest neighbor model to use
    annoy_index_path = data_path + '.ann'

    # Load generative models from pickles to generate from scratch.
    try:
        tf.logging.info('Build generative model...')
        gen_model_use = utils.GenModelUSE(
            annoy_index_path=annoy_index_path,
            unique_strings=unique_strings,
            module_path=args.module_path,
            use_sentence_piece=args.use_sentence_piece)
        tf.logging.info('Generative model built.')
    except (OSError, IOError) as e:
        tf.logging.error(e)
        tf.logging.info('Error building generative model.')

    # build a loop for interactive mode
    while True:
        # get user input
        user_input = input('\nQuery Text: ')
        # if user input is too short
        if len(user_input) < 1:
            continue
        nns, distances = gen_model_use.inference(
            user_input,
            num_neighbors=args.num_neighbors,
            use_sentence_piece=args.use_sentence_piece)

        # print all the returned responses, and distance to input
        for nn, distance in zip(nns, distances):
            print('d: {}, {}'.format(distance,
                                     unique_strings[nn].split(args.delimiter)))
示例#18
0
def main():
    args = parse_arguments()
    algo = args.algo
    env = gym.make("TwoLinkArm-v0")

    if (algo == "lqr"):
        plot_path = os.path.join(os.getcwd(), "lqr_plots")
        make_dir(plot_path)
        x0, u_seq = run_lqr(env, plot_path)

    elif (algo == "ilqr"):
        plot_path = os.path.join(os.getcwd(), "ilqr_plots")
        make_dir(plot_path)
        run_ilqr(env, plot_path)

    else:
        raise ValueError("algo could only be \"lqr\" or \"ilqr\" ")
示例#19
0
def main(arguments):
    """Main run function for interacting with the model."""

    # Parse the arguments
    args = utils.parse_arguments(arguments)

    tf.logging.info('Loading unique strings.')

    data_path = args.infile
    unique_strings_path = data_path + '.embedded.pkl_unique_strings.csv'
    # load the unique lines
    with open(unique_strings_path) as f:
        unique_strings = [line.rstrip() for line in f]

    tf.logging.info('Lodaded {} unique strings'.format(len(unique_strings)))

    # define the path of the nearest neighbor model to use
    annoy_index_path = data_path + '.ann'

    # Load generative models from pickles to generate from scratch.
    try:
        tf.logging.info('Build generative model...')
        gen_model_use = utils.GenModelUSE(
            annoy_index_path=annoy_index_path,
            unique_strings=unique_strings,
            module_path=args.module_path,
            use_sentence_piece=args.use_sentence_piece)
        tf.logging.info('Generative model built.')
    except (OSError, IOError) as e:
        tf.logging.info('Error building generative model.')

    # build a loop for interactive mode
    while True:
        # get user input
        user_input = input('\nQuery Text: ')
        # if user input is too short
        if len(user_input) < 1:
            continue
        resp = gen_model_use.inference(user_input, num_neighbors=1,
                                       args=args).split(args.delimiter)
        # respond with the response from the [input,response] pair
        # using matching to the input string
        tf.logging.info('Closest matched root: {}'.format(resp[0]))
        if len(resp) > 0:
            tf.logging.info('Response: {}'.format(resp[0]))
示例#20
0
def main():
    try:
        args = parse_arguments()
        main_logger.info(args)
        if args.mode == SCAN:
            run_scan(args)
        elif args.mode == REPORTS:
            get_reports(args)
        elif args.mode == DEPCHECK:
            depcheck(args)
    except Exception as e:
        main_logger.info(e)
        try:
            run_subprocess(f"docker network rm {NETWORK_SCAN}")
        except Exception as _:
            main_logger.warning(f"Error removing {NETWORK_SCAN}")
        try:
            run_subprocess(f"docker volume rm {VOL_SCAN}")
        except Exception as _:
            main_logger.warning(f"Error removing {VOL_SCAN}")
示例#21
0
def create_winros_catkin_package():
    (package, depends) = utils.parse_arguments(['std_msgs'])
    create_package_directory(package)
    templates = get_templates(package)
    build_depends = ''.join(
        ['  <build_depend>%s</build_depend>\n' % d for d in depends])
    run_depends = ''.join(
        ['  <run_depend>%s</run_depend>\n' % d for d in depends])
    cmake_depends = ''.join(['%s ' % d for d in depends])
    for filename, template in templates.iteritems():
        contents = utils.instantiate_template(template, package, package,
                                              utils.author_name(),
                                              build_depends, run_depends,
                                              cmake_depends)
        try:
            p = os.path.abspath(os.path.join(package, filename))
            f = open(p, 'w')
            f.write(contents.encode('utf-8'))
            print "Created package file", p
        finally:
            f.close()
    print "\nPlease edit package.xml, mainpage.dox, CMakeLists.txt, and add the package subdirectory."
示例#22
0
def create_qt_ros_package(type):
	is_catkin=True
	if type == 'qt-ros-legacy':
		is_catkin=False
	#print "Create qt_ros_package(is_catkin:%r)" % is_catkin
	(package, depends) = utils.parse_arguments(['qt_build','roscpp'], is_catkin)
			
    # Make directories
	p = os.path.abspath(package)
	os.makedirs(os.path.join(p,"src"))
	os.makedirs(os.path.join(p,"include"))
	os.makedirs(os.path.join(p,"include",package))
	os.makedirs(os.path.join(p,"resources"))
	os.makedirs(os.path.join(p,"resources","images"))
	os.makedirs(os.path.join(p,"ui"))
	print "Created qt package directories."

    # Qt text files
	manifest_depends = ''.join(['  <depend package="%s"/>\n'%d for d in depends])
	cmake_depends = ''.join(['%s '%d for d in depends])
	templates = get_qt_text_templates(package, type)
	for filename, template in templates.iteritems():
		contents = utils.instantiate_template(template, package, package, package, utils.author_name(), manifest_depends, cmake_depends)
		try:
			p = os.path.abspath(os.path.join(package, filename))
			f = open(p, 'w')
			f.write(contents.encode('utf-8'))
			print "Created package file", p
		finally:
			f.close()
	# Qt binary files
	template_dir = os.path.join(os.path.dirname(__file__),'templates',type) 
	shutil.copy(os.path.join(template_dir,'resources','images','icon.png'),
				os.path.join(os.path.abspath(package),'resources','images','icon.png'))
	if type == 'qt-ros-legacy':
		utils.print_concluding_message(package)
	else:
		utils.print_concluding_catkin_message(package)
示例#23
0
def main():
    args = utils.parse_arguments()
    device = torch.device(args.device if torch.cuda.is_available() else "cpu")

    print(device)

    with np.printoptions(precision=4, suppress=True):
        if args.experiment == 'mlp':
            train_acc, val_acc, test_acc = run_mlp_experiment(args, device)
        elif args.experiment == 'vgg':
            train_acc, val_acc, test_acc = run_vgg_experiment(args, device)
        else:
            raise NotImplementedError(
                'experiment must be mlp or vgg, but %s was given' %
                args.experiment)

    print('Final validation accuracy: %f' % val_acc[-1])
    print('Job finished')

    if args.save_results:
        np.save(args.results_filename + '_train_acc', train_acc)
        np.save(args.results_filename + '_val_acc', val_acc)
        np.save(args.results_filename + '_test_acc', test_acc)
示例#24
0
def main(arguments):
    """Main run function for indexing the embeddings."""

    # Parse the arguments
    args = utils.parse_arguments(arguments)

    unique_strings_path = args.infile + '.embedded.pkl_unique_strings.csv'

    # Load the unique lines
    with open(unique_strings_path) as f:
        unique_strings = [line.rstrip() for line in f]

    unique_embeddings_path = (args.infile +
                              '.embedded.pkl_unique_strings_embeddings.txt')
    # Load the unique embeddings
    with open(unique_embeddings_path) as f:
        unique_embeddings = [[float(x) for x in line.strip().split()]
                             for line in f]

    tf.logging.info(
        'Loaded {} unique strings, {} embeddings of dimension {}'.format(
            len(unique_strings), len(unique_embeddings),
            len(unique_embeddings[0])))

    # Length of item vector that will be indexed
    nn_forest = AnnoyIndex(512)

    for i in range(len(unique_strings)):
        v = unique_embeddings[i]
        nn_forest.add_item(i, v)

    # Build an approximate nearest neighbor forest with num_trees
    nn_forest.build(int(args.num_trees))
    output_path = args.infile + '.ann'
    nn_forest.save(output_path)

    tf.logging.info('Index forest built {}'.format(output_path))
示例#25
0
def create_comms_package(type):
    
	(package, depends) = utils.parse_arguments(['std_msgs'])
	
	# Make directories
	p = os.path.abspath(package)
	os.makedirs(os.path.join(p,"msg"))
	os.makedirs(os.path.join(p,"srv"))
	print "Created package directories."

	# Text files
	manifest_depends = ''.join(['  <depend package="%s"/>\n'%d for d in depends])
	cmake_depends = ''.join(['%s '%d for d in depends])
	templates = get_text_templates(package, type)
	for filename, template in templates.iteritems():
		contents = utils.instantiate_template(template, package, package, package, utils.author_name(), manifest_depends, cmake_depends)
		try:
			p = os.path.abspath(os.path.join(package, filename))
			f = open(p, 'w')
			f.write(contents.encode('utf-8'))
			print "Created package file", p
		finally:
			f.close()
	utils.print_concluding_catkin_message(package)
示例#26
0
def create_validation():

    opt = parse_arguments()
    print(opt)
    root = opt.root
    save_dir = root + 'val_dataset_peragent/'
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    else:
        print("Data folder exists! Continue?")
        pdb.set_trace()
    loader = features_loader(root=root,
                             probs=opt.probs,
                             norm=opt.norm,
                             ours=opt.ours,
                             partition='train/')
    val_z, val_images_indexes_sender, val_images_indexes_receiver = \
                            create_val_batch(opt, loader)
    suffix = 'seed%d_same%d' % (opt.manualSeed, opt.same)
    pickle.dump(val_z, open(save_dir + "val_z" + suffix, "wb"))
    pickle.dump(val_images_indexes_sender,
                open(save_dir + "val_images_indexes_sender" + suffix, "wb"))
    pickle.dump(val_images_indexes_receiver,
                open(save_dir + "val_images_indexes_receiver" + suffix, "wb"))
示例#27
0
        best_loss = min(val_loss, best_loss)

        # Save checkpoint
        model_filename = 'checkpoint_%03d.pth.tar' % epoch
        checkpoint = {
            'epoch': epoch,
            'model': generator.state_dict(),
            'discriminator': discriminator.state_dict(),
            'gen_optimizer': gen_optimizer.state_dict(),
            'disc_optimizer': disc_optimizer.state_dict(),
            'best_loss': best_loss
        }
        save_checkpoint(checkpoint, model_filename, is_best, save_path)

    # Evaluate model using PSNR and SSIM metrics
    evaluate(args, generator, val_loader)


def save_checkpoint(checkpoint, filename, is_best, save_path):
    print("===> Saving checkpoint '{}'".format(filename))
    model_filename = save_path / filename
    best_filename = save_path / 'model_best.pth.tar'
    torch.save(checkpoint, model_filename)
    if is_best:
        shutil.copyfile(model_filename, best_filename)
    print("===> Saved checkpoint '{}'".format(model_filename))


if __name__ == '__main__':
    main(parse_arguments(argv[1] if len(argv) >= 2 else "run_configs/default_gan.ini"))
    # Test case: GET /spendingauthority with invalid authority onids
    def test_get_non_authority_onids(self, endpoint='/spendingauthority'):
        for onid in self.test_cases['invalid_authority_onids']:
            params = {'onid': onid}
            response = utils.make_request(self, endpoint, 200, params=params)
            spending_schema = []
            self.assertEqual(response.json()['data'], spending_schema)

    # Test case: GET /spendingauthority with bad request
    def test_bad_request_response(self, endpoint='/spendingauthority'):
        bad_params = [{'onid': ''}, {}]
        for param in bad_params:
            response = utils.make_request(self, endpoint, 400, params=param)
            error_schema = utils.get_resource_schema(self, 'Error')
            utils.check_schema(self, response, error_schema)


if __name__ == '__main__':
    arguments, argv = utils.parse_arguments()

    # Setup logging level
    if arguments.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    integration_tests.setup(arguments.config_path, arguments.openapi_path)
    unittest.main(argv=argv)
    integration_tests.cleanup()
示例#29
0

def output(last_matches):
    music_cluster_list = []
    for idx, cluster in enumerate(last_matches):
        for music in cluster:
            music_cluster_list.append((music, idx))
    music_cluster_list.sort()
    for t in music_cluster_list:
        print t[0], t[1]


if __name__ == '__main__':
    usage_text = 'Usage: %prog -i input_file -k num_of_clusters [-c centroids_file]'
    parser = OptionParser(usage=usage_text)
    (options, args) = parse_arguments(parser)
    file_name = options.input
    k = options.k

    tags = get_set_of_tags_from_file(file_name)
    list_of_all_tags = sorted(tags)
    musics = get_musics_from_file(file_name, list_of_all_tags)

    if options.debug:
        print 'Numbers of tags:', len(list_of_all_tags)

    if options.centroids_file:
        centroids = get_centroids_from_file(musics, k, options.centroids_file)
    else:
        # centroids = random_centroids(k)
        centroids = forgy_initialization(musics, k)
def main():
    """Runs queries to create training and prediction tables from clean data."""

    # Load config shared by all steps of feature creation.
    config_path = utils.parse_arguments(sys.argv).config_path
    config = utils.read_config(config_path)
    # Project-wide config.
    global_config = config['global']
    # Path to SQL files.
    queries_path = config['file_paths']['queries']
    # SQL files for different pipeline steps.
    query_files = config['query_files']
    # Parameters unique to individual pipeline steps.
    query_params = config['query_params']

    # Create the dataset to hold data for the pipeline run.
    utils.create_dataset(
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'])

    # Query to remove nulls from the target column (company_response_to_consumer)
    # and from complaint_narrative column.
    remove_nulls_params = utils.merge_dicts(global_config,
                                            query_params['remove_nulls'])

    utils.create_table(
        query_path=os.path.join(queries_path, query_files['remove_nulls']),
        query_params=remove_nulls_params,
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'],
        destination_table=global_config['nulls_removed_table'],
        partition_field=None)

    # Query to cleanup the categories of issue, subissue, product, subproduct.
    utils.create_table(
        query_path=os.path.join(queries_path, query_files['clean_categories']),
        query_params=global_config,
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'],
        destination_table=global_config['cleaned_features_table'],
        partition_field=None)

    # Query to merge the cleaned features and the table with nulls removed.
    utils.create_table(
        query_path=os.path.join(queries_path, query_files['combine_tables']),
        query_params=global_config,
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'],
        destination_table=global_config['clean_table'],
        partition_field=None)

    # Query to split the clean dataset into training and prediction datasets.
    # The training dataset will be fed to the AutoML Tables for training and
    # the prediction dataset will be used for batch prediction.
    features_split_params = utils.merge_dicts(
        global_config, query_params['train_predict_split'])

    utils.create_table(
        query_path=os.path.join(queries_path,
                                query_files['train_predict_split']),
        query_params=features_split_params,
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'],
        destination_table=global_config['train_predict_split'],
        partition_field=None)

    # Query to create the prediction table.
    features_split_params = utils.merge_dicts(
        global_config, query_params['train_predict_split'])

    utils.create_table(
        query_path=os.path.join(queries_path,
                                query_files['prediction_features']),
        query_params=features_split_params,
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'],
        destination_table=global_config['features_predict_table'],
        partition_field=None)

    # Query to create the training table along with the manual split into train,
    # validation and test rows for the AutoML tables.
    features_split_params = utils.merge_dicts(
        global_config, query_params['train_predict_split'])

    utils.create_table(
        query_path=os.path.join(queries_path,
                                query_files['training_features']),
        query_params=features_split_params,
        destination_project=global_config['destination_project_id'],
        destination_dataset=global_config['destination_dataset'],
        destination_table=global_config['features_train_table'],
        partition_field=None)
示例#31
0
文件: psycam.py 项目: JoBergs/psycam
    """ Gather all parameters (source image, layer descriptor and octave),
    create a net and start to dream. """

    source_path = get_source_image(args)
    layer = get_layer_descriptor(args)
    octave = (args.octaves if args.octaves else randint(1, 9))

    model_file = '../caffe/models/bvlc_googlenet/bvlc_googlenet.caffemodel'

    if args.network:
        create_net(model_file)
    net = load_net(model_file)

    psycam = PsyCam(net=net)
    psycam.iterated_dream(source_path=source_path, 
                                             end=layer, octaves=octave)    

if __name__ == "__main__":
    try:
        args = parse_arguments(sys.argv[1:])
        while True:
            start_dream(args)

            if not args.continually:
                break

    except Exception as e:
        import traceback
        print(traceback.format_exc())
        print('Quitting PsyCam')
示例#32
0
  def __init__(self):
    self.args = utils.parse_arguments()

    self.ipc_fuzzer_binary = utils.get_fuzzer_application_name()
    self.ipc_fuzzer_binary_path = utils.get_application_path(
        self.ipc_fuzzer_binary)
if __name__ == '__main__':
    import datetime as dt
    import multiprocessing as mp
    from utils import parse_arguments
    from InterventionsMIP import load_config_file, logger, change_paths

    # Parse arguments
    args = parse_arguments()
    # Load config file
    load_config_file(args.f_config)
    # Adjust paths
    change_paths(args)

    from instances import load_instance, load_tiers, load_seeds
    from policy_search_functions import policy_search, capacity_policy_search
    from objective_functions import multi_tier_objective, multi_tier_objective_ACS
    from policies import MultiTierPolicy as MTP
    from policies import MultiTierPolicy_ACS as MTP_ACS

    # Parse city and get corresponding instance
    instance = load_instance(args.city,
                             setup_file_name=args.f,
                             transmission_file_name=args.tr,
                             hospitalization_file_name=args.hos)
    train_seeds, test_seeds = load_seeds(args.city, args.seed)
    tiers = load_tiers(args.city, tier_file_name=args.t)

    # TODO Read command line args for n_proc for better integration with crunch
    n_proc = args.n_proc

    # TODO: pull out n_replicas_train and n_replicas_test to a config file
示例#34
0
import numpy as np
from datetime import datetime
from decouple import config
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.feature_extraction.text import TfidfVectorizer
from hdfs.ext.kerberos import KerberosClient

from utils import (
    clean_text,
    get_keys,
    parse_arguments,
)
from models import (OneVsRestLogisticRegression)
from queries import (get_train_data, get_list_of_classes)

options = parse_arguments()

URL_ORACLE_SERVER = options['oracle_server']
USER_ORACLE = options['oracle_user']
PASSWD_ORACLE = options['oracle_password']
ORACLE_DRIVER_PATH = options['oracle_driver_path']
HDFS_URL = options['hdfs_url']
HDFS_USER = options['hdfs_user']
HDFS_MODEL_DIR = options['hdfs_model_dir']
START_DATE = options['start_date']
END_DATE = options['end_date']

NEGATIVE_CLASS_VALUE = 13
ID_COLUMN = 'SNCA_DK'
TEXT_COLUMN = 'SNCA_DS_FATO'
LABEL_COLUMN = 'DMDE_MDEC_DK'
示例#35
0
def main():
    logfile = "{0}/log/sentence_segment.log".format(PROJECT_DIR)
    utils.setup_logging(logfile, logging.INFO)
    logging.info(sys.path)

    args = utils.parse_arguments()
    batchsize = args.batchsize

    start_time = time.time()

    # set_env_vars()

    spark_session = spark.create_spark_session()

    es = elastic.check_elasticsearch()
    es_write_conf = spark.broadcast_es_write_config(spark_session)

    s3resource = aws.create_s3_resource()
    keys = aws.get_list_s3_files(s3resource,
                                 filetype=TEXT_FOLDER,
                                 numrows=batchsize)

    pbooks = s3_to_rdd(spark_session, keys)
    # testing_rdd = spark.sparkContext.wholeTextFiles("s3a://jason-b/{0}".format(TEXT_FOLDER), minPartitions=6, use_unicode=False)
    # spark.log_rdd(pbooks)

    pipeline = spark_nlp.setup_pipeline()
    books = spark_nlp.segment_sentences(spark_session, pbooks, pipeline)

    # Go from one book per row to one sentence per row
    sentences = books.select(
        func.monotonically_increasing_id().alias("sentence_id"),
        func.col("fileName"),
        func.posexplode("sentence.result").alias("position", "sentenceText"),
        func.size("sentence.result").alias("numSentencesInBook"),
    )
    # logging.info("Num Sentences: {0}".format(sentences.count()))

    sentences = spark_nlp.tokenize_sentences(sentences)

    count_syllables_udf = func.udf(
        lambda s: _udf_sentence_count_syllables_sentence(s),
        ArrayType(IntegerType()))
    count_sentence_multisyllables_udf = func.udf(
        lambda s: sum(_udf_sentence_count_syllables_sentence(s)),
        IntegerType())
    count_array_multisyllables_udf = func.udf(
        lambda a: sum(_udf_array_count_syllables_sentence(a)), IntegerType())
    sentences = sentences.select(
            "sentence_id",
            "fileName",
            "position",
            "sentenceText",
            "numSentencesInBook",
                    # count_sentence_multisyllables_udf('sentenceText') \
                    # .alias("multiSyllableCount")
            count_array_multisyllables_udf("words") \
                    .alias("multiSyllableCount"),
            func.size("words").alias("numWordsInSentence"),
            )
    sentences.printSchema()

    # pipeline = spark_nlp.setup_sentiment_pipeline()
    # output = spark_nlp.sentiment_analysis(sentence_data, pipeline)

    # Format to load ElasticSearch
    sentences = sentences.select(
        "sentence_id",
        func.to_json(
            func.struct("sentence_id", "fileName", "position", "sentenceText",
                        "numSentencesInBook", "multiSyllableCount",
                        "numWordsInSentence")).alias("value"))
    # sentence = output.select(["sentence_id", "sentence"]).toJSON()
    sentences = sentences.rdd.map(lambda x: elastic.format_data(x))

    # Write to ES
    write_rdd_to_es(sentences, es_write_conf)
    """
    # _read_es()
    # sentences = sentence.rdd.map(lambda s: s.sentence[0].result)
    # sentences = sentence.rdd.flatMap(lambda s: s.sentence)
    # results = sentence.rdd.map(lambda s: s.result).zipWithUniqueId()

    """
    spark_session.stop()
    end_time = time.time()
    logging.info("RUNTIME: {0}".format(end_time - start_time))
示例#36
0
 def parse_arguments(self):
   self.args = utils.parse_arguments()
示例#37
0
def run():
    args = utils.parse_arguments()
    logger.info('Hitting main...')
    main(args)
示例#38
0
def handle_command(slack_client, sender, command, channel):
    """
        Executes bot command if the command is known
    """
    # Default response is help text for the user
    global em
    default_response = "Sorry .. *Sergio* and *Taeyeong* haven't taught me how to respond to your command .. "

    # Finds and executes the given command, filling in response
    response = ""

    # get info about users in channel
    users_list = slack_client.api_call("users.list", token=AUTH_TOKEN)
    members = users_list["members"]
    all_ids = [member["id"] for member in members]  # extract only names
    all_names = [member["real_name"]
                 for member in members]  # extract only names
    sender_email = [
        member["profile"]["email"] for member in members
        if member['id'] == sender
    ][0]
    sender_name = [
        member["real_name"] for member in members if member['id'] == sender
    ][0]

    # This is where you start to implement more commands!
    if command.startswith(CREATE_COMMAND):

        #### error handling needed for invalid guests
        em, success = update_event(em,
                                   sender,
                                   command,
                                   all_ids,
                                   all_names,
                                   user_email=sender_email,
                                   user_name=sender_name,
                                   create=True)
        event = em.dict[sender]

        # message
        response += "Creating this event:"
        response += '\n' + event.export_str()
        response += "\n\nIf you want to add more info, please use `{}` command, or if you want to push to your Google Calendar account, use `{}` command.".format(
            ADD_COMMAND, PUSH_COMMAND)
    elif command.startswith(ADD_COMMAND):
        event = em.find_event(sender)
        if event == None:
            response += "You haven't created any event yet. Use `{}` command first!".format(
                CREATE_COMMAND)
        else:
            users_list = slack_client.api_call("users.list", token=AUTH_TOKEN)
            members = users_list["members"]
            all_ids = [member["id"]
                       for member in members]  # extract only names
            all_names = [member["name"]
                         for member in members]  # extract only names
            em, success = update_event(em,
                                       sender,
                                       command,
                                       all_ids,
                                       all_names,
                                       create=False)
            event = em.find_event(sender)

            response += '\n' + event.export_str()
            response += "\nIf you want to add more info, please use `{}` command.\nIf you want to push to your Google Calendar account, use `{}` command".format(
                ADD_COMMAND, PUSH_COMMAND)

    elif command.startswith(MODIFY_COMMAND):
        items = command.split()
        if len(items) < 4:
            response += "You must enter `{}` command, event ID, field to change, and its new value".format(
                MODIFY_COMMAND)
        else:
            arg_to_change = parse_arguments(command)
            em, success = update_event(em,
                                       sender,
                                       command,
                                       all_ids,
                                       all_names,
                                       user_email=sender_email,
                                       user_name=sender_name,
                                       create=True)
            editEvent(google_service, sender_email, items[1], arg_to_change,
                      em.find_event(sender))
            response += "Event updated."

    elif command.startswith(PUSH_COMMAND):
        event = em.find_event(sender)
        if event == None:
            response += "You haven't created any event yet. Use `{}` command first!".format(
                CREATE_COMMAND)
        else:
            event_id = createEvent(google_service, event.title, event.loc,
                                   event.desc, event.time, event.endtime,
                                   event.host_email, event.guest_emails,
                                   event.alert)
            em.delete_event(sender)
            response += "Your event has been created! The event ID is *{}* you can use to `{}` or `{}`".format(
                event_id, MODIFY_COMMAND, DELETE_COMMAND)
            response += "\nNow, you don't have any event to `{}`.".format(
                PUSH_COMMAND)

    elif command.startswith(LIST_COMMAND):
        event = em.find_event(sender)
        if event == None:
            response += "You haven't created any event yet. Use `{}` command first!".format(
                CREATE_COMMAND)
        else:
            response += '\n' + event.export_str()
            response += "\n\nIf you want to add more info, please use `{}` command, or if you want to push to Google Calendar, use `{}` command.".format(
                ADD_COMMAND, PUSH_COMMAND)

    elif command.startswith(INIT_COMMAND):
        em.delete_event(sender)
        response += "Now, you don't have any event to `{}`.".format(
            PUSH_COMMAND)

    elif command.startswith(HELP_COMMAND):
        response += show_help()

    elif command.startswith(DELETE_COMMAND):
        items = command.split()
        if len(items) != 2:
            response += "You must enter `{}` command and 1 event ID.".format(
                DELETE_COMMAND)
        else:
            event_id = items[1]
            deleteEvent(google_service, sender_email, event_id)
            response += "Event deleted."
    # Sends the response back to the channel
    slack_client.api_call("chat.postMessage",
                          channel=channel,
                          text=response or default_response)
示例#39
0
            else:
                conn.sendall(make_message('unknowncommand'))

        conn.close()
        logger.info('Close thread connection...')

    def run(self):
        logger.info('Run server... HOST={} PORT={}'.format(self.host, self.port))
        self.socket.listen(self.MAX_CONNECTIONS)

        while not self.is_stop:
            conn, addr = self.socket.accept()
            logger.info('Connect to {}'.format(addr))
            self.create_thread(conn, addr)

        print(111)

        self.shutdown()

    def shutdown(self):
        for thread in self.threads:
            thread.join()

        self.socket.close()
        logger.info('Server shutdown.')


if __name__ == '__main__':
    args = parse_arguments()
    server = Server(args.host, args.port)
    server.run()
示例#40
0
    )
)
mutual_exclusive.add_argument(
    '--brian2cuda',
    action='store_true',
    help=(
        "Pass ``test`` string as ``-k`` option to select tests from brian2cuda test "
        "suite."
    )
)

parser.add_argument('--only',
                    choices=['single-run', 'multi-run', 'standalone-only'],
                    default=None)

args = utils.parse_arguments(parser)

import sys, os, pytest
from io import StringIO
import numpy as np

import brian2
import brian2cuda
from brian2.devices.device import reset_device
from brian2.tests import clear_caches, make_argv, PreferencePlugin
from brian2 import prefs
import brian2cuda

all_prefs_combinations = utils.set_preferences(args, prefs)

# target independent preferences
示例#41
0
    def __init__(self):
        self.args = utils.parse_arguments()

        self.ipc_fuzzer_binary = utils.get_fuzzer_application_name()
        self.ipc_fuzzer_binary_path = utils.get_application_path(
            self.ipc_fuzzer_binary)