コード例 #1
0
ファイル: main.py プロジェクト: zainkhan12345/todo-cli
def loadAdd(todo):
    os.system('clear')
    name = ""
    description = ""
    date = ""
    while (name == ""):
        print("Enter the name of the task")
        name = input()
        if name == "":
            print("Task name can't be empty")
    description = input(
        "Enter the Description of the task (Optional, Default:Empty): ")
    while (True):
        date = input(
            "Enter the date of the task (Optional, Default:Today's Date): ")
        date = validate(date)
        if (date is not None):
            break
        else:
            print(
                "Date entered in wrong format please enter in the format dd-mm-yyyy"
            )
    priority = ""
    while ((not (priority.isnumeric())) or (not (1 <= int(priority) <= 10))):
        priority = input(
            "Enter the prioirty of the task (Optional, Default:1): ")
        if (priority == ""):
            break
    if (date == ""):
        date = datetime.today().strftime("%d-%m-%y")
    if (todo.tasks.get(date) is None):
        todo.tasks[date] = []
    todo.createTask(name, description, priority, date)
    print("Task Added successfully")
    time.sleep(1)
コード例 #2
0
ファイル: main.py プロジェクト: zainkhan12345/todo-cli
def login(todo):
    os.system("clear")
    while (True):
        print("Date to Login ( Format dd-mm-yyyy): ")
        date = input()
        date = validate(date)
        if (date is not None):
            todo.setDate(date)
            loadDashboardTodo(todo)
        else:
            print("Date format is not correct")
コード例 #3
0
model_selector_lstm = Early_Stopper(patience)
criterion = nn.MSELoss()
optimizer_rnn = torch.optim.Adam(rnn.parameters())
optimizer_gru = torch.optim.Adam(gru.parameters())
optimizer_lstm = torch.optim.Adam(lstm.parameters())

print('Training started at:', time_start)

while (model_selector_rnn.keep_training or model_selector_gru.keep_training
       or model_selector_lstm.keep_training):

    if model_selector_rnn:
        rnn_loss.append([
            train(x_tr, y_tr, batch_size, optimizer_rnn, criterion, rnn,
                  False),
            validate(x_va, y_va, batch_size, criterion, rnn, False),
            test(x_te, y_te, batch_size, criterion, rnn, False)
        ])

        rnn_time = str(datetime.datetime.now() - time_start)
        model_selector_rnn.update(rnn_loss[-1][1], n_epochs)

    if model_selector_gru:
        gru_loss.append([
            train(x_tr, y_tr, batch_size, optimizer_gru, criterion, gru,
                  False),
            validate(x_va, y_va, batch_size, criterion, gru, False),
            test(x_te, y_te, batch_size, criterion, gru, False)
        ])

        gru_time = str(datetime.datetime.now() - time_start)
コード例 #4
0
#                                 args.cuda)])
#    
#            gru_time = str(datetime.datetime.now()-time_start)
#            model_selector_gru.update(gru_loss[-1][1], n_epochs)
    
        if model_selector_lstm.keep_training:
            lstm_loss.append([train(x_tr,
                                    y_tr,
                                    batch_size,
                                    optimizer_lstm,
                                    criterion,
                                    lstm,
                                    args.cuda),
                             validate(x_va,
                                      y_va,
                                      batch_size,
                                      criterion,
                                      lstm,
                                      args.cuda),
                             test(x_te,
                                  y_te,
                                  batch_size,
                                  criterion,
                                  lstm,
                                  args.cuda)])

            lstm_time = str(datetime.datetime.now()-time_start)
            model_selector_lstm.update(lstm_loss[-1][1], n_epochs)

        n_epochs += 1

#        s1 = pandas.Series([n_epochs, rnn_loss[-1][0], rnn_loss[-1][1],
コード例 #5
0
ファイル: cli.py プロジェクト: SeMorgana/address-book
def main():
    while True:
        print "\nSelect an action, then hit enter:\
               \n1-Creat a new book\
               \n2-Open a book\
               \n9-quit"

        inp = raw_input().strip()
        if not inp.isdigit():
            print "Invalid input!"
            continue
        choice = int(inp)
        if choice == 1 or choice == 2:
            b = None
            if choice == 1:
                b = Book()
            elif choice == 2:
                try:
                    path = raw_input("book path:").strip()
                    b = Book(path)
                except IOError:
                    sys.stderr.write("\nFile '%s' cannot be read!\n" % path)
                    continue
            while True:
                print "\nSelect an action for the book: \
                    \n1-add entry\
                    \n2-print entry\
                    \n3-save as\
                    \n4-sort by(fname, lname or zip_code)\
                    \n5-delete an entry(by index based on print)\
                    \n6-edit an entry\
                    \n9-return to up level"

                inp2 = raw_input().strip()
                if not inp2.isdigit():
                    print "Invalid input!"
                    continue
                choice2 = int(inp2)
                if choice2 == 1:
                    fname = raw_input("firstname:").strip()
                    lname = raw_input("lastname:").strip()
                    b.add_entry(Entry(fname, lname))
                elif choice2 == 2:
                    b.show_entry()
                elif choice2 == 3:
                    path = raw_input("file name:").strip()
                    b.save_as(path)
                elif choice2 == 4:
                    attr = raw_input(("input '%s', '%s' or '%s':") % (FNAME, LNAME, ZIP_CODE)).strip()
                    b.sort(attr)
                elif choice2 == 5:
                    l = len(b)
                    if l == 0:
                        print "Nothing left!"
                        continue
                    index = raw_input("input 0-" + str(l - 1) + ":").strip()
                    if not index.isdigit() or int(index) < 0 or int(index) > l - 1:
                        print "Invalid input!"
                        continue
                    b.delete_entry(int(index))
                elif choice2 == 6:
                    l = len(b)
                    if l == 0:
                        print "Nothing to edit!"
                        continue
                    index = raw_input("select an entry to edit (input 0-" + str(l - 1) + "):").strip()
                    if not index.isdigit() or int(index) < 0 or int(index) > l - 1:
                        print "Invalid input!"
                        continue
                    attr = raw_input(
                        ("select an attribute to edit '%s', '%s', '%s', '%s', '%s', '%s', '%s' or '%s':")
                        % (FNAME, LNAME, ADDR, CITY, STATE, ZIP_CODE, PHONE_NUM, EMAIL)
                    ).strip()
                    value = raw_input("input an value for %s:" % attr).strip()
                    if utility.validate(attr, value):
                        b.edit_entry(int(index), attr, value)
                    else:
                        print ("\nInvalid value for %s!") % attr
                elif choice2 == 9:
                    break
                else:
                    print "Unimplemented!"
        elif choice == 9:
            break
        else:
            print "Unimplemented!"
コード例 #6
0
def main_worker(gpu, ngpus_per_node, args):
    global best_prec1, sample_size
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))
        print("Current Device is ", torch.cuda.get_device_name(0))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)

    # create model2:
    if args.pretrained:
        print("=> Model (date_diff): using pre-trained model '{}_{}'".format(
            args.model, args.model_depth))
        pretrained_model = models.__dict__[args.arch](pretrained=True)
    else:
        if args.model_type == 2:
            print("=> Model (date_diff regression): creating model '{}_{}'".
                  format(args.model, args.model_depth))
            pretrained_model = generate_model(args)  # good for resnet
            save_folder = "{}/Model/{}{}".format(args.ROOT, args.model,
                                                 args.model_depth)

    model = longi_models.ResNet_interval(pretrained_model,
                                         args.num_date_diff_classes,
                                         args.num_reg_labels)

    criterion0 = torch.nn.CrossEntropyLoss().cuda(args.gpu)  # for STO loss
    criterion1 = torch.nn.CrossEntropyLoss().cuda(args.gpu)  # for RISI loss

    criterion = [criterion0, criterion1]
    start_epoch = 0

    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=0,
                                 amsgrad=False)

    # all models optionally resume from a checkpoint
    if args.resume_all:
        if os.path.isfile(args.resume_all):
            print("=> Model_all: loading checkpoint '{}'".format(
                args.resume_all))
            checkpoint = torch.load(args.resume_all,
                                    map_location=lambda storage, loc: storage)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            for state in optimizer.state.values():
                for k, v in state.items():
                    if isinstance(v, torch.Tensor):
                        state[k] = v.cuda()
            start_epoch = checkpoint['epoch']
            print("=> Model_all: loaded checkpoint '{}' (epoch {})".format(
                args.resume_all, checkpoint['epoch']))

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    print("batch-size = ", args.batch_size)
    print("epochs = ", args.epochs)
    print("range-weight (weight of range loss) = ", args.range_weight)
    cudnn.benchmark = True
    print(model)

    # Data loading code
    traingroup = ["train"]
    evalgroup = ["eval"]
    testgroup = ["test"]

    train_augment = ['normalize', 'flip', 'crop']  # 'rotate',
    test_augment = ['normalize', 'crop']
    eval_augment = ['normalize', 'crop']

    train_stages = args.train_stages.strip('[]').split(', ')
    test_stages = args.test_stages.strip('[]').split(', ')
    eval_stages = args.eval_stages.strip('[]').split(', ')
    #############################################################################
    # test-retest analysis

    trt_stages = args.trt_stages.strip('[]').split(', ')

    model_pair = longi_models.ResNet_pair(model.modelA,
                                          args.num_date_diff_classes)
    torch.cuda.set_device(args.gpu)
    model_pair = model_pair.cuda(args.gpu)

    if args.resume_all:
        model_name = args.resume_all[:-8]

    else:
        model_name = save_folder + "_" + time.strftime("%Y-%m-%d_%H-%M")+ \
                     traingroup[0] + '_' + args.train_stages.strip('[]').replace(', ', '')

    data_name = args.datapath.split("/")[-1]

    log_name = (args.ROOT + "/log/" + args.model + str(args.model_depth) +
                "/" + data_name + "/" + time.strftime("%Y-%m-%d_%H-%M"))
    writer = SummaryWriter(log_name)

    trt_dataset = long.LongitudinalDataset3DPair(
        args.datapath, testgroup, args.datapath + "/test_retest_list.csv",
        trt_stages, test_augment, args.max_angle, args.rotate_prob,
        sample_size)

    trt_loader = torch.utils.data.DataLoader(trt_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             num_workers=args.workers,
                                             pin_memory=True)

    print("\nEvaluation on Test-Retest Set: ")

    util.validate_pair(trt_loader, model_pair, criterion,
                       model_name + "_test_retest", args.epochs, writer,
                       args.print_freq)

    ##########################################################################

    train_dataset = long.LongitudinalDataset3D(
        args.datapath,
        traingroup,
        args.datapath + "/train_list.csv",
        train_stages,
        train_augment,  # advanced transformation: add random rotation
        args.max_angle,
        args.rotate_prob,
        sample_size)

    eval_dataset = long.LongitudinalDataset3D(args.datapath, evalgroup,
                                              args.datapath + "/eval_list.csv",
                                              eval_stages, eval_augment,
                                              args.max_angle, args.rotate_prob,
                                              sample_size)

    test_dataset = long.LongitudinalDataset3D(args.datapath, testgroup,
                                              args.datapath + "/test_list.csv",
                                              test_stages, test_augment,
                                              args.max_angle, args.rotate_prob,
                                              sample_size)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        # sampler = train_sampler,
        num_workers=args.workers,
        pin_memory=True)

    eval_loader = torch.utils.data.DataLoader(eval_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.workers,
                                              pin_memory=True)

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.workers,
                                              pin_memory=True)

    data_name = args.datapath.split("/")[-1]

    if args.resume_all:
        model_name = args.resume_all[:-8]

    else:
        model_name = save_folder + "_" + time.strftime("%Y-%m-%d_%H-%M")+ \
                     traingroup[0] + '_' + args.train_stages.strip('[]').replace(', ', '')

    # Use a tool at comet.com to keep track of parameters used
    # log model name, loss, and optimizer as well
    hyper_params["loss"] = criterion
    hyper_params["optimizer"] = optimizer
    hyper_params["model_name"] = model_name
    hyper_params["save_folder"] = save_folder
    experiment.log_parameters(hyper_params)
    # End of using comet

    log_name = (args.ROOT + "/log/" + args.model + str(args.model_depth) +
                "/" + data_name + "/" + time.strftime("%Y-%m-%d_%H-%M"))
    writer = SummaryWriter(log_name)

    if args.evaluate:
        print("\nEVALUATE before starting training: ")
        util.validate(eval_loader,
                      model,
                      criterion,
                      model_name + "_eval",
                      writer=writer,
                      range_weight=args.range_weight)

    # training the model
    if start_epoch < args.epochs - 1:
        print("\nTRAIN: ")
        for epoch in range(start_epoch, args.epochs):
            if args.distributed:
                train_sampler.set_epoch(epoch)
            util.adjust_learning_rate(optimizer, epoch, args.lr)

            # train for one epoch
            util.train(train_loader,
                       model,
                       criterion,
                       optimizer,
                       epoch,
                       sample_size,
                       args.print_freq,
                       writer,
                       range_weight=args.range_weight)

            # evaluate on validation set
            if epoch % args.eval_freq == 0:
                csv_name = model_name + "_eval.csv"
                if os.path.isfile(csv_name):
                    os.remove(csv_name)
                prec = util.validate(eval_loader,
                                     model,
                                     criterion,
                                     model_name + "_eval",
                                     epoch,
                                     writer,
                                     range_weight=args.range_weight)

                if args.early_stop:

                    early_stopping = util.EarlyStopping(
                        patience=args.patience, tolerance=args.tolerance)

                    early_stopping(
                        {
                            'epoch': epoch + 1,
                            'arch1': args.arch1,
                            'arch2': args.model2 + str(args.model2_depth),
                            'state_dict': model.state_dict(),
                            'optimizer': optimizer.state_dict(),
                        }, prec, model_name)

                    print("=" * 50)

                    if early_stopping.early_stop:
                        print("Early stopping at epoch", epoch, ".")
                        break

                else:
                    # remember best prec@1 and save checkpoint
                    is_best = prec > best_prec1
                    best_prec1 = max(prec, best_prec1)
                    util.save_checkpoint(
                        {
                            'epoch': epoch + 1,
                            'arch': args.model + str(args.model_depth),
                            'state_dict': model.state_dict(),
                            'best_prec1': best_prec1,
                            'optimizer': optimizer.state_dict(),
                        }, is_best, model_name)

    if args.test:
        print("\nTEST: ")
        util.validate(test_loader,
                      model,
                      criterion,
                      model_name + "_test",
                      args.epochs,
                      writer,
                      range_weight=args.range_weight)

        print("\nEvaluation on Train Set: ")
        util.validate(train_loader,
                      model,
                      criterion,
                      model_name + "_train",
                      args.epochs,
                      writer,
                      range_weight=args.range_weight)

    #############################################################################################################

    # test on only the basic sub-network (STO loss)
    model_pair = longi_models.ResNet_pair(model.modelA,
                                          args.num_date_diff_classes)
    torch.cuda.set_device(args.gpu)
    model_pair = model_pair.cuda(args.gpu)

    if args.test_pair:

        train_pair_dataset = long.LongitudinalDataset3DPair(
            args.datapath, traingroup, args.datapath + "/train_pair_list.csv",
            train_stages, test_augment, args.max_angle, args.rotate_prob,
            sample_size)

        train_pair_loader = torch.utils.data.DataLoader(
            train_pair_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.workers,
            pin_memory=True)

        print("\nEvaluation on Train Pair Set: ")

        util.validate_pair(train_pair_loader, model_pair, criterion,
                           model_name + "_train_pair_update", args.epochs,
                           writer, args.print_freq)

        test_pair_dataset = long.LongitudinalDataset3DPair(
            args.datapath, testgroup, args.datapath + "/test_pair_list.csv",
            test_stages, test_augment, args.max_angle, args.rotate_prob,
            sample_size)

        test_pair_loader = torch.utils.data.DataLoader(
            test_pair_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.workers,
            pin_memory=True)

        print("\nEvaluation on Test Pair Set: ")

        util.validate_pair(test_pair_loader, model_pair, criterion,
                           model_name + "_test_pair_update", args.epochs,
                           writer, args.print_freq)

    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()
コード例 #7
0
 def update_data_matrix(self, tableWidgetItem):
     lower_bound = 0.0
     upper_bound = float("inf")
     value = 0.0
     if tableWidgetItem.column() == 0:
         element = "length"
         value = utility.validate(tableWidgetItem.text(),
                                  lower_bound,
                                  upper_bound,
                                  l_inclusive=False,
                                  u_inclusive=False)
     elif tableWidgetItem.column() == 1:
         element = "separation"
         lower_bound = -1.0 * float("inf")
         value = utility.validate(tableWidgetItem.text(),
                                  lower_bound,
                                  upper_bound,
                                  l_inclusive=False,
                                  u_inclusive=False)
     elif tableWidgetItem.column() == 2:
         element = "earth impedance"
         try:
             # explicit string conversion required for Python 2.7
             value = np.complex(str(tableWidgetItem.text()))
         except:
             value = False
     elif tableWidgetItem.column() == 3:
         element = "soil resistivity"
         value = utility.validate(tableWidgetItem.text(),
                                  lower_bound,
                                  upper_bound,
                                  l_inclusive=False,
                                  u_inclusive=False)
     if not value is False:
         columns = [0, 1, 2, 4, 5]
         column = columns[tableWidgetItem.column()]
         update_mapping = (globals.sections[tableWidgetItem.row(), column]
                           != value)
         if isinstance(value, np.complex):
             globals.sections[tableWidgetItem.row(),
                              column] = np.real(value)
             globals.sections[tableWidgetItem.row(),
                              column + 1] = np.imag(value)
             if np.imag(value) == 0.0:
                 tableWidgetItem.setText(str(np.real(value)))
             else:
                 tableWidgetItem.setText(str(value))
         else:
             globals.sections[tableWidgetItem.row(), column] = value
             #tableWidgetItem.setText(str(value))
         if update_mapping:
             self.main_window.refresh_mapping()
     else:
         self.main_window.show_status_message(
             "Section " + str(tableWidgetItem.row() + 1) + " " + element +
             ": Input value '" + tableWidgetItem.text() +
             "' out of bounds. (" + str(lower_bound) + " to " +
             str(upper_bound) + "). Value not set.",
             error=True,
             beep=True)
         self.tableWidget.itemChanged.disconnect()
         self.refresh_data()
         self.tableWidget.itemChanged.connect(self.update_data_matrix)