Пример #1
0
def main():
    args = argparser.parse_args()
    pdf_doc = PDF_Doc()
    pdf = pdf_doc.get_pdf_doc(args.fromobject)
    if args.to == 'results/':
        _save_pdf('results/', pdf_doc.full_name, pdf)
    else:
        _save_pdf(args.to, pdf_doc.full_name, pdf)
Пример #2
0
def main():
    """TODO: Docstring for main.
    :returns: TODO

    """
    args = parse_args()
    drugs, protiens,relations, ddi, ppi, dpi = read_data(args)
    # print(eval_kfold(drugs, protiens, relations, ddi, ppi, dpi, args))
    print(eval_1fold(drugs, protiens, relations, ddi, ppi, dpi, args))
Пример #3
0
def main(argv=None):
    opt = parse_args(argv)

    tasks = TCGAMeta(download=True, preload=True)
    task = tasks[113]

    # Setup the results dictionary
    filename = "experiments/results/clinical-tasks.pkl"
    try:
        results = pickle.load(open(filename, "rb"), encoding='latin1')
        print("Loaded Checkpointed Results")
    except Exception as e:
        print(e)
        results = pd.DataFrame(columns=[
            'task', 'acc_metric', 'model', 'graph', 'trial', 'train_size',
            'time_elapsed'
        ])
        print("Created a New Results Dictionary")

    train_size = 50
    trials = 3
    cuda = True
    exp = []

    for trial in range(trials):
        model = GCN(cuda=cuda,
                    dropout=opt.dropout,
                    num_layer=opt.num_layer,
                    channels=opt.channels,
                    embedding=opt.embedding,
                    aggregation=opt.aggregation,
                    lr=opt.lr,
                    agg_reduce=opt.agg_reduce,
                    seed=trial)
        task._samples = task._samples - task._samples.mean(axis=0)
        task._samples = task._samples / task._samples.var()
        X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
            task._samples,
            task._labels,
            stratify=task._labels,
            train_size=train_size,
            test_size=len(task._labels) - train_size)
        adj = sparse.csr_matrix(nx.to_numpy_matrix(GeneManiaGraph().nx_graph))
        model.fit(X_train, y_train, adj=adj)

        y_hat = []
        for chunk in get_every_n(X_test, 10):
            y_hat.extend(np.argmax(model.predict(chunk), axis=1).numpy())

        exp.append(model.metric(y_test, y_hat))
        print(exp)
    report_results([{
        "name": "acc_metric",
        "type": "objective",
        "value": np.array(exp).mean()
    }])
Пример #4
0
def main():
    """
    Play RPS
    """
    try:
        opts, args = getopt.getopt(sys.argv[1:], "n:pPh",
                                   ["numgames=", "print", "plot", "help"])
    except getopt.GetoptError as err:
        print(str(err))
        print(usage())
        sys.exit(2)

    ngames = 100
    printg = False
    plot = False
    for opt, arg in opts:
        if opt in ("-n", "--numgames"):
            ngames = int(arg)
        elif opt in ("-p", "--print"):
            printg = True
        elif opt in ("-P", "--plot"):
            plot = True
        elif opt in ("-h", "--help"):
            print(usage())
            sys.exit(0)

    if len(args) == 2:
        p1_pstyle, p1_hsize = ap.parse_args(args[0])
        p2_pstyle, p2_hsize = ap.parse_args(args[1])

        p_1 = Player(play_style=p1_pstyle, hist=p1_hsize)
        p_2 = Player(play_style=p2_pstyle, hist=p2_hsize)
    else:
        p_1 = Player(play_style=_HIST, hist=2)
        p_2 = Player(play_style=_SEQ)

    games = MultipleGames(p_1,
                          p_2,
                          num_games=ngames,
                          print_games=printg,
                          plot=plot)

    games.arrange_tournament()
def main(argv=None):
    opt = parse_args(argv)
    dataset = datasets.TCGADataset()
    dataset.df = dataset.df - dataset.df.mean(axis=0)

    gene_graph = GeneManiaGraph()
    search_num_genes = [50, 100, 200, 300, 500, 1000, 2000, 4000, 8000, 16300]
    test_size = 300
    cuda = torch.cuda.is_available()
    exp = []
    for num_genes in search_num_genes:
        start_time = time.time()
        gene = "RPL4"
        model = GCN(cuda=cuda,
                    dropout=opt.dropout,
                    num_layer=opt.num_layer,
                    channels=opt.channels,
                    embedding=opt.embedding,
                    aggregation=opt.aggregation,
                    lr=opt.lr,
                    agg_reduce=opt.agg_reduce)
        dataset.labels = dataset.df[gene].where(
            dataset.df[gene] > 0).notnull().astype("int")
        dataset.labels = dataset.labels.values if type(
            dataset.labels) == pd.Series else dataset.labels
        X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
            dataset.df,
            dataset.labels,
            stratify=dataset.labels,
            train_size=opt.train_size,
            test_size=opt.test_size,
            random_state=opt.seed)
        if num_genes == 16300:
            neighbors = gene_graph.nx_graph
        else:
            neighbors = gene_graph.bfs_sample_neighbors(gene, num_genes)

        X_train = X_train[list(neighbors.nodes)].copy()
        X_test = X_test[list(neighbors.nodes)].copy()
        X_train[gene] = 1
        X_test[gene] = 1
        adj = sparse.csr_matrix(nx.to_numpy_matrix(neighbors))
        model.fit(X_train, y_train, adj=adj)

        y_hat = model.predict(X_test)
        y_hat = np.argmax(y_hat, axis=1)
        auc = sklearn.metrics.roc_auc_score(y_test,
                                            np.asarray(y_hat).flatten())
        del model
        exp.append(auc)
    report_results([{
        "name": "auc",
        "type": "objective",
        "value": np.array(exp).mean()
    }])
Пример #6
0
def main():
    args = parse_args()
    transcribe = Transcriber()
    chunker = ChunkSpeaker()
    converter = Converter()
    try:
        if args.mode == 'transcribe':
            transcribe.transcribe(args.input_folder, args.output_folder)
        if args.mode == 'chunk_speaker':
            chunker.chunk(args.audio_input_path, args.speech_segmentation_path,
                          args.output_folder)
        if args.mode == 'convert':
            converter.convert(args.type, args.online_folder,
                              args.chunks_text_path, args.output_folder)

    except InputError as e:
        print(f'{InputError.__name__}:\n\t{e}')
Пример #7
0
def main():
    os.chdir(os.path.expanduser('~'))
    if not os.path.exists('./.tikup'):
        os.mkdir('./.tikup')
    os.chdir('./.tikup')

    args = parse_args()
    username = args.user
    delete = args.no_delete
    limit = args.limit
    archive = args.use_download_archive

    downloadType = ''
    if archive:
        try:
            file = open('archive.txt', 'r+')
        except FileNotFoundError:
            f = open('archive.txt', 'x')
            f.close()
            file = open('archive.txt', 'r+')
    else:
        file = None
    did = str(random.randint(10000, 999999999))
    if args.hashtag:  # Download hashtag
        downloadType = 'hashtag'
        tiktoks = getHashtagVideos(username, limit)
    elif args.id:  # Download user ID
        downloadType = 'id'
        tiktoks = [username]
    elif args.liked:  # Download liked
        downloadType = 'liked'
        tiktoks = getLikedVideos(username, limit)
    else:  # Download username
        downloadType = 'username'
        tiktoks = getUsernameVideos(username, limit)
    tiktoks = downloadTikToks(username, tiktoks, file, downloadType, did)
    if args.no_upload:  # Upload to IA
        uploadTikToks(tiktoks, file, delete)

    try:
        file.close()
    except:
        pass
    print('')
Пример #8
0
def main():
    args = parse_args(__doc__, ['env', 'mode', 'timesteps'])

    env = gym.make(args.env)
    env.render(mode=args.mode)
    obs = env.reset()
    env.unwrapped.set_gravity(-1)

    # TODO: use controller arg
    ctrl = leanLeftRightController(env)

    for _ in range(args.timesteps):
        obs, _, done, _ = env.step(ctrl.get_action(obs))
        if done:
            print(
                '\n=============\nThe episode has finished, restarting\n=============\n'
            )
            time.sleep(1)
            obs = env.reset()
            ctrl.reset()
Пример #9
0
def main():
    args = argparser.parse_args()

    file_path = args.file_path
    buffer_size = args.buffer_size
    comparator = args.comparator

    if file_path is None:
        file_path = input('Input file path: ')

    if buffer_size is None:
        buffer_size = int(input('Input buffer size: '))

    if comparator is None:
        comparator = input('Input comparator: ')

    if os.path.exists('temp'):
        shutil.rmtree('temp')
    os.mkdir('temp')

    make_temp_files(file_path, buffer_size, COMPARATORS[comparator], args.r)

    sort(COMPARATORS[comparator], args.r)
Пример #10
0
def main():
    args = argparser.parse_args()

    try:
        page_id = get_page_id(args.url)

    except (RuntimeError, requests.exceptions.ConnectionError) as error:
        print("{}: runtime error: {}".format(sys.argv[0], error), file=sys.stderr)
        sys.exit(1)

    print("Downloading posts. This may take some time, be patient...")

    postsReceiver = PostsReceiver(page_id=page_id)
    try:
        if not args.workers or args.workers > 1:
            posts = postsReceiver.parallel_receive(max_workers=args.workers)
        else:
            posts = postsReceiver.receive()
    except KeyboardInterrupt:
        msg = "{}: {}".format(sys.argv[0], "Keyboard interrupt. Exiting...")
        print(msg, file=sys.stderr)
        sys.exit(1)
    except VKApiError as error:
        print("{}: vk api error: {}".format(sys.argv[0], error), file=sys.stderr)
        sys.exit(1)
    except Exception as error:
        msg = "{}: catastrophic error: {}".format(sys.argv[0], error)
        print(msg, file=sys.stderr)
        sys.exit(1)

    if args.reposts:
        posts = sorted(posts, key=lambda x: -x.reposts)
    else:
        posts = sorted(posts, key=lambda x: -x.likes)

    pretty_print(posts[: args.top], page_id, args.reposts)
Пример #11
0
#!/usr/bin/env python
# Generative Adversarial Networks (GAN) example with 2D samples in PyTorch.
import os
from skimage import io
import torch
import torch.nn as nn
from torch.autograd import Variable
from sampler import generate_lut, sample_2d
from visualizer import GANDemoVisualizer
from argparser import parse_args
from networks import SimpleMLP

DIMENSION = 2

args = parse_args()
cuda = False if args.cpu else True
bs = args.batch_size
z_dim = args.z_dim

density_img = io.imread(args.input_path, True)
lut_2d = generate_lut(density_img)

visualizer = GANDemoVisualizer('GAN 2D Example Visualization of {}'.format(args.input_path))

generator = SimpleMLP(input_size=z_dim, hidden_size=args.g_hidden_size, output_size=DIMENSION)
discriminator = SimpleMLP(input_size=DIMENSION, hidden_size=args.d_hidden_size, output_size=1)

if cuda:
    generator.cuda()
    discriminator.cuda()
criterion = nn.BCELoss()
Пример #12
0
def main():
    """ Mostly inspired from 
        https://github.com/totti0223/gradcamplusplus/blob/master/gradcamutils.py#L10 """
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'  # Makes TensorFlow shut up
    args = parse_args()
    logger = get_logger(args.verbose)

    logger.info(
        "Loading the input image and resizing it to (224, 224) as required by the model."
    )
    image = np.array(load_img(args.input, target_size=(224, 224)),
                     dtype=np.uint8)

    logger.info(
        "Pre-processing the image like ImageNet's were when VGG was trained (sub mean from channels) "
        "also add the batch dimension (only one image)")
    image_processed = preprocess_input(np.expand_dims(image, axis=0))

    logger.info("Instantiate the pre-trained VGG19")
    model = VGG19(include_top=True, input_shape=(224, 224, 3))

    logger.info("Gets which class is the most activated")
    prediction = model.predict(image_processed)
    predicted_class = np.argmax(prediction)
    predicted_class_name = decode_predictions(prediction, top=1)[0][0][1]

    logger.info(
        "Gets the tensor object (a scalar here) which is activated when showing the image"
    )
    y_c_tensor = model.output[0, predicted_class]

    logger.info(
        "Gets the tensor object corresponding to the output of the studied convolution layer"
    )
    A_tensor = model.get_layer(args.layer).output

    logger.info("Gets the tensor containing the gradient of y_c w.r.t. A")
    gradient_tensor = K.gradients(y_c_tensor, A_tensor)[0]

    logger.info(
        "Creates a function that takes as input the model's input "
        "and outputs the convolutions' result and the gradient of the prediction w.r.t. it"
    )
    run_graph = K.function([model.input], [A_tensor, gradient_tensor])

    logger.info("Runs the graph using the inputted image")
    A, gradient = run_graph([image_processed])
    A, gradient = A[0], gradient[
        0]  # Gets the result of the first batch dimension

    logger.info("Performs global average pooling onto the activation gradient")
    alpha_c = np.mean(gradient, axis=(0, 1))

    logger.info("Weighs the filters maps with the activation coefficient")
    L_c = np.dot(A, alpha_c)

    logger.info("Resizes the localisation map to match the input image's size")
    L_c = zoom(L_c, 224 / L_c.shape[0])

    logger.info("Plots the original image and the superimposed heat map")
    plt.subplots(nrows=1, ncols=2, dpi=160, figsize=(7, 4))
    plt.subplots_adjust(left=0.01,
                        bottom=0.0,
                        right=0.99,
                        top=0.96,
                        wspace=0.11,
                        hspace=0.2)
    plt.subplot(121)
    plt.title("Original image")
    plt.imshow(image)
    plt.axis("off")
    plt.subplot(122)
    plt.title("{}th dimension ({}) \nw.r.t layer {}".format(
        predicted_class, predicted_class_name, args.layer))
    plt.imshow(image)
    plt.imshow(L_c, alpha=0.5, cmap="jet")
    plt.axis("off")

    if args.output is not None:
        logger.info("Saves the figure under {}".format(args.output))
        plt.savefig(args.output, dpi=300)

    if not args.quiet:
        plt.show()
Пример #13
0
def doWork():
    args, kwargs = argparser.parse_args()
    kmeans(*args, **kwargs)
Пример #14
0
def main():
    nb_total_steps = 1000
    nb_iterations = 40
    hidden_layers = [256, 256]
    writer = tensorboardX.SummaryWriter()

    args = parse_args(__doc__, ['env'])


    env = gym.make(args.env) 

    ctrl = rand_ctrl = RandomController(env)


    # ipdb.set_trace()
    print('#inputs : %d' % ctrl.nb_inputs())
    print('#actions: %d' % ctrl.nb_actions())

    # f_net = make_net(
    #     [ctrl.nb_inputs() + ctrl.nb_actions()] + hidden_layers + [ctrl.nb_inputs()],
    #     [nn.ReLU() for _ in hidden_layers],
    # )
    f_net = MOENetwork(
        nb_inputs=ctrl.nb_inputs() + ctrl.nb_actions(),
        nb_experts=4,
        gait_layers=[64],
        expert_layers=[64, ctrl.nb_inputs()],
    )

    data = collect_data(env, ctrl, nb_total_steps*10)


    # ipdb.set_trace()

    dynamics = DynamicsModel(env, f_net, data.get_all(), writer=writer)
    # cost_func = lambda s,a,sn: -sn[3].item()  # refers to vx
    cost_func = get_cost(args.env)  # refers to vx

    # data.calc_normalizations()
    # dynamics.fit(data)

    mpc_ctrl = MPCcontroller(env, dynamics.predict, cost_func, num_simulated_paths=100, horizon=10, num_mpc_steps=10)
    eval_args = EvaluationArgs(nb_burnin_steps=4, nb_episodes=10, horizons=[1, 2, 4, 8, 16, 32])

    for i in range(nb_iterations):
        print('Iteration', i)
        new_data = collect_data(env, ctrl, nb_total_steps)
        dynamics.fit(*new_data.get_all())
        data.extend(new_data)
        dynamics.fit(*data.sample(sample_size=4*nb_total_steps))
        evaluate_and_log_dynamics(
            dynamics.predict, env, rand_ctrl, writer=writer, i_step=i, args=eval_args
        )
        evaluate_and_log_dynamics(
            dynamics.predict, env, mpc_ctrl, writer=writer, i_step=i, args=eval_args
        )
        # dynamics.fit(*data.get_all())
        if random.random() > 0.5:
            ctrl = rand_ctrl
        else:
            ctrl = mpc_ctrl
    
    env = gym.make(args.env)

    ctrl = MPCcontroller(env, dynamics.predict, cost_func, num_simulated_paths=1000, num_mpc_steps=4)

    # TODO

    env.render(mode='human')
    obs = env.reset()

    for _ in range(100):
        # time.sleep(1. / 60.)
        obs, r, done, _ = env.step(ctrl.get_action(obs))
        # print('  ', cost_func(obs))
        if done:
            print("done:", r, obs)
            time.sleep(1)
            ctrl.reset()
            obs = env.reset()
    ipdb.set_trace()
Пример #15
0
def main():
    args = parse_args()

    # __________________ Params ___________________
    sample_size = args.sample_size
    train_batch_size = args.train_batch_size
    test_batch_size = 128
    num_epochs = args.num_epochs
    num_workers = args.num_worker
    lr = args.learning_rate
    start_epoch = 0
    # _____________________________________________

    manual_seed = random.randint(1, 10000)
    random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    torch.set_num_threads(num_workers + 1)
    cudnn.benchmark = True
    # cudnn.deterministic = False
    cudnn.enabled = True

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    coco_train = Coco(
        'datasets/data/coco_data/person_keypoints_train2017.json', 'train',
        sample_size, transforms.Compose([normalize]))
    coco_val = Coco('datasets/data/coco_data/person_keypoints_val2017.json',
                    'train', sample_size, transforms.Compose([normalize]))

    train_loader = DataLoader(coco_train,
                              batch_size=train_batch_size,
                              shuffle=True,
                              num_workers=num_workers,
                              pin_memory=False)
    val_loader = DataLoader(coco_val,
                            batch_size=test_batch_size,
                            shuffle=False,
                            num_workers=num_workers,
                            pin_memory=False)

    pose_net = bninception(out_chn=2)
    model = DataParallel(pose_net)
    model.cuda()

    #checkpoint = torch.load('models/m_100.pth')
    #pretrained_dict = checkpoint['state_dict']
    #model.load_state_dict(pretrained_dict)

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    #os.makedirs(args.log, exist_ok=True)

    for epoch in range(start_epoch, num_epochs):
        if epoch == 100:
            for param_group in optimizer.param_groups:
                param_group['lr'] = 1e-4

        dloss, scale_loss = train(train_loader, model, optimizer)
        tloss = 'det_loss ' + str(dloss) + ' scale loss ' + str(scale_loss)

        dloss, scale_loss = train_test(val_loader, model)
        test_loss = 'det_loss ' + str(dloss) + ' scale loss ' + str(scale_loss)

        with open('losses/train_loss_384.txt', 'a') as the_file:
            the_file.write(str(tloss) + '\n')

        with open('losses/test_loss_384.txt', 'a') as the_file:
            the_file.write(str(test_loss) + '\n')

        ckpt = {
            'epoch': epoch,
            'optimizer': optimizer.state_dict(),
            'state_dict': model.state_dict()
        }

        #os.makedirs(args.model_save_path, exist_ok=True)
        #ckpt_name = os.path.join(args.model_save_path, 'epoch_%d.ckpt' % epoch)
        torch.save(ckpt, 'models/m_384_' + str(epoch) + '.pth')