コード例 #1
0
def main():
    '''Run the program'''
    args = vars(set_args())
    clear_screen()
    print("Welcome to", sys.argv[0])
    print("The peptide we're using today is", args['peptide'])
    input("Press <enter> to find all gene sequence that could code for this peptide using a brute force algorithm...")
    bruteTime = brute_time(args['peptide'], translation)
    input("Press <enter>")
    repeat = True
    while repeat:
        clear_screen()
        input("Now, let's look at a scoring algorithm, press <enter> to start this...")
        scoreTime = score_time(args['peptide'], translation)
        rep = input("Would you like to repeat the scoring algorithm? If so, type 'yes' without the quotes: ")
        if not rep == 'yes':
            repeat = False
    clear_screen()
    input("Finally, let's look at a branch-and-bound algorithm, press <enter> to start...")
    branchTime = branch_time(args['peptide'], translation)
    input("Press <enter>")
    clear_screen()
    print("To recap:")
    print("Our brute force algorithm took", bruteTime, "seconds of walltime")
    print("Our scoring algorithm took", scoreTime, "seconds of walltime")
    print("Our branch-and-bound algorithm took", branchTime, "seconds of walltime")
コード例 #2
0
ファイル: train.py プロジェクト: sharkmir1/itdl-project
def main(args):
    if not os.path.isdir(args.save):
        os.mkdir(args.save)

    dataset = PaperDataset(args)
    args = set_args(args, dataset)
    model = Model(args)

    print(f"Device: {args.device}")
    model = model.to(args.device)
    if args.ckpt:
        cpt = torch.load(args.ckpt)
        model.load_state_dict(cpt)
        start_epoch = int(args.ckpt.split("/")[-1].split(".")[0]) + 1
        args.lr = float(args.ckpt.split("-")[-1])

        print(f"Model parameter restored to {args.ckpt}")
    else:
        with open(args.save + "/commandLineArgs.txt", 'w') as f:
            f.write("\n".join(sys.argv[1:]))
        start_epoch = 0

    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)

    # early stopping based on Val Loss
    latest_loss = 1000000

    for e in range(start_epoch, args.epochs):
        print("Epoch: ", e, "\tLearning Rate: ",
              optimizer.param_groups[0]['lr'])
        train(model, optimizer, dataset, args)
        valid_loss = evaluate(model, dataset, args)
        if args.lrwarm:
            update_lr(optimizer, args, e)

        print("Saving model")
        torch.save(
            model.state_dict(),
            args.save + "/" + str(e) + ".vloss-" + str(valid_loss)[:8] +
            ".lr-" + str(optimizer.param_groups[0]['lr']))

        if valid_loss > latest_loss:
            if args.lrdecay:
                print("Learning rate decayed.")
                optimizer.param_groups[0]['lr'] *= 0.5
        latest_loss = valid_loss
コード例 #3
0

class SupporterMgr(DBApp):
    def __init__(self):
        DBApp.__init__(self, 'supporters')

    def _create_tables(self):
        self._sql('CREATE TABLE if not exists supporters (name text)', True)

    def list(self, _print=False):
        ret = self._sql('SELECT * from supporters')
        if _print: pprint(ret)
        return ret

    def add(self, name):
        self._sql("INSERT INTO supporters VALUES ('%s')" % name, True)

    def remove(self, name):
        self._sql(['DELETE FROM supporters WHERE name=?', (name, )], True)


if __name__ == '__main__':
    args = [('--list', {
        'action': 'store_true'
    }), ('--add', {}), ('--remove', {})]
    args = set_args(args)
    supp_mgr = SupporterMgr()
    if args.list: supp_mgr.list(True)
    if args.add: supp_mgr.add(args.add)
    if args.remove: supp_mgr.remove(args.remove)
コード例 #4
0
        result.write(str(idx + 1) + '.\n')
        result.write("GOLD\n" +
                     gold.encode('ascii', 'ignore').decode('utf-8', 'ignore') +
                     '\n')
        result.write(
            "GEN\n" +
            beam.encode('ascii', 'ignore').decode('utf-8', 'ignore').lower() +
            '\n\n')

    return preds, golds


if __name__ == "__main__":
    args = parse_args()
    args.eval = True
    dataset = PaperDataset(args)
    args = set_args(args, dataset)
    model = Model(args)
    state_dict = torch.load(args.save,
                            map_location=lambda storage, location: storage)
    model.load_state_dict(state_dict)
    model = model.to(args.device)
    model.args = args
    model.maxlen = args.maxlen
    model.starttok = dataset.OUTPUT.vocab.stoi['<start>']
    model.endtok = dataset.OUTPUT.vocab.stoi['<eos>']
    model.eostok = dataset.OUTPUT.vocab.stoi['.']

    test(args, dataset, model)
    # evaluate(model, dataset, args)  # uncomment this and comment above in order to check loss and PPL
コード例 #5
0
ファイル: main.py プロジェクト: EthanHolleman/tea-seq
def main():
    argue = set_args()
    make_libraries(argue.r, argue.n, argue.d, argue.f, argue.flash)
コード例 #6
0
ファイル: cmdline.py プロジェクト: stephan-cr/dude
def main(cargs):
    # folder from where dude is called
    cfolder = os.getcwd()

    # parse command line
    (options, cargs) = parser.parse_args(cargs)

    # check if a command has been given
    if cargs == []:
        parser.print_help()
        sys.exit()

    # create requires no Dudefile, so we deal with it right here
    if cargs[0] == "create":
        if len(cargs) < 2:
            expgen.create()
        else:
            expgen.create(cargs[1])
        sys.exit(0)

    # all other commands require a Dudefile, so we first load it (in "cfg")
    cfg = None

    # use a given dudefile in options
    if options.expfile != None:
        try:
            cfg = imp.load_source('', options.expfile)
        except IOError:
            print >> sys.stderr, 'ERROR: Loading', options.expfile, 'failed'
            parser.print_help()
            sys.exit(1)
    else:  # try default file names
        current = os.getcwd()
        max_folder = 10  # arbitrary number of parent directories
        i = 0
        while i < max_folder:
            for f in ['desc.py', 'dudefile', 'Dudefile', 'dudefile.py']:
                try:
                    if os.path.exists(f) and i > 0:
                        print "Opening Dudefile: ", os.path.abspath(f)
                    cfg = imp.load_source('', f)
                    break
                except IOError:
                    pass
            if cfg != None:
                break
            else:
                i += 1
                parent, last = os.path.split(current)
                os.chdir(parent)
                current = parent

        if cfg == None:
            print >> sys.stderr, 'ERROR: no dudefile found'
            parser.print_help()
            sys.exit(1)

    # add to actual folder as root in cfg
    cfg.root = os.getcwd()

    # check if cfg can be used for core functions
    core.check_cfg(cfg)

    # check if cfg can be used for summaries
    summary.check_cfg(cfg)

    # parse arguments to module
    if options.margs:
        margs = args.parse(";".join(options.margs))
        print "Passing arguments:", margs
        args.set_args(cfg, margs)

    if hasattr(cfg, 'dude_version') and cfg.dude_version >= 3:
        dimensions.update(cfg)

    # collect filters
    filters = []
    if options.filter and options.filter != []:
        for fi in options.filter:
            for f in fi.split(','):
                filters.append(cfg.filters[f])

    if options.filter_inline and options.filter_inline != []:
        filters += filt.create_inline_filter(cfg, options.filter_inline)

    if options.filter_path:
        current = os.getcwd()
        if current != cfolder:
            # this assumes Dudefile is in the root of the experiment folder
            os.chdir(cfolder)
            path = os.path.abspath(options.filter_path)
            os.chdir(current)
            path = os.path.relpath(path)  # get raw_output_dir/exp_... format
        else:
            path = options.filter_path

        filters += filt.filter_path(cfg, path)

    # get experiments
    experiments = core.get_experiments(cfg)

    # select the set of experiments to be considered (successful,
    # failed or pending)
    if (options.success and options.failed and options.pending) or\
            not (options.success or options.failed or options.pending):
        pass
    else:
        failed, pending = core.get_failed_pending_exp(cfg, experiments)
        expin = []
        expout = []

        if options.failed:
            expin += failed
        else:
            expout += failed

        if options.pending:
            expin += pending
        else:
            expout += pending

        if options.success:
            experiments = [exp for exp in experiments if exp not in expout]
        else:
            experiments = expin

    # apply filters
    if filters != []:
        experiments = filt.filter_experiments(cfg, filters, options.invert,
                                              False, experiments)

    cmd = cargs[0]
    if cmd == 'run':
        if options.force:
            clean.clean_experiments(cfg, experiments)
        execute.run(cfg, experiments, options)
    elif cmd == 'run-once':
        assert len(experiments) == 1
        optpt = experiments[0]
        folder = "once"
        utils.checkFolder(folder)  # create if necessary
        if options.force:
            clean.clean_experiment(folder)
        execute.execute_isolated(cfg, optpt, folder, options.show_output)
    elif cmd == 'sum':
        summary.summarize(cfg, experiments, cargs[1:], options.backend,
                          options.ignore_status)
    elif cmd == 'list':
        for experiment in experiments:
            if options.dict:
                print "experiment:", experiment
            else:
                print core.get_folder(cfg, experiment)
    elif cmd == 'failed':
        failed = core.get_failed(cfg, experiments, False)
        for ffile in failed:
            print ffile
    elif cmd == 'missing':
        failed = core.get_failed(cfg, experiments, True)
        for exp in failed:
            print exp
    elif cmd == 'clean':
        if options.invalid:
            clean.clean_invalid_experiments(cfg, experiments)
        else:
            # TODO if no filter applied, ask if that's really what the
            # user wants.
            r = 'y'
            if options.filter == None and \
                    options.filter_inline == None:
                print "sure to wanna delete everything? [y/N]"
                r = utils.getch()  #raw_input("Skip, quit, or continue?
                #[s/q/c]")

            if r == 'y':
                clean.clean_experiments(cfg, experiments)
    elif cmd == 'visit':
        if len(cargs) < 2:
            print "Specify a bash command after visit"
            sys.exit(1)
        elif len(cargs) > 2:
            print "Surround multi-term bash commands with \"\"."
            print "e.g., \"%s\"" % ' '.join(cargs[1:])
            sys.exit(1)
        visit.visit_cmd_experiments(cfg, experiments, cargs[1])
    elif cmd == 'info':
        info.show_info(cfg, experiments)
    elif cmd == 'status':
        info.print_status(cfg, experiments)
    else:
        print >> sys.stderr, "ERROR: wrong command. %s" % cargs[0]
        parser.print_help()
コード例 #7
0
        average_acc = []
        for test_task in range(task + 1):
            test_images = mnist.test.images

            test_images = test_images[:, task_permutation[test_task]]
            acc = model.test(test_images[:1000], mnist.test.labels[:1000])
            acc = acc * 100
            average_acc.append(acc)
            if args.num_tasks_to_run == task + 1:
                last_performance.append(acc)
            # print("Testing, task: ", test_task + 1, " \tAccuracy: ", acc)
            logger.info("Testing, task: {}\tAccuracy: {}".format(
                test_task + 1, acc))
        logger.info("average accuracy: {}".format(np.mean(average_acc)))
    return last_performance


if __name__ == "__main__":

    args = set_args()
    logger = logging.getLogger(__name__)
    logger.setLevel(level=logging.INFO)
    handler = logging.FileHandler(args.log, mode="w")
    handler.setLevel(logging.INFO)
    logger.addHandler(handler)
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    logger.addHandler(console)
    tf.app.run()