示例#1
0
def main():
    device = torch.device(
        'cpu')  # I didn't select gpu due to prevent possible failures
    torch.manual_seed(1234)
    transforms = T.Compose([T.ToTensor(), T.Normalize((0.5, ), (0.5, ))])
    dataset = MnistDataset('data', 'train', transforms)
    train_dataset, validation_dataset = torch.utils.data.random_split(
        dataset, [8000, 2000])
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=64,
                                  shuffle=True,
                                  num_workers=4)
    validation_dataloader = DataLoader(validation_dataset,
                                       batch_size=64,
                                       shuffle=False,
                                       num_workers=4)
    lr_list = [0.01, 0.003, 0.001, 0.0003, 0.0001, 0.00003]
    layer_list = [256, 512, 1024]
    func_list = ["relu", "sig", "tanh"]
    hidden_layer = [0, 1, 2]
    epochs = 12

    for hl in hidden_layer:
        for lal in layer_list:
            for lrl in lr_list:
                if hl != 0:
                    for fl in func_list:
                        layer = hl
                        size = lal
                        func = fl
                        model = MyModel(layer, size)
                        model = model.to(device)
                        optimizer = torch.optim.Adam(model.parameters(),
                                                     lr=lrl)
                        print("hidden layer", layer, "layer size", size,
                              "learning rate", lrl, "function", func)
                        train(model, optimizer, train_dataloader,
                              validation_dataloader, epochs, device, layer,
                              func, False)
                if hl == 0:
                    layer = hl
                    size = lal
                    func = "relu"  # to prevent possible failuers, I give a function that is never used
                    model = MyModel(layer, size)
                    model = model.to(device)
                    optimizer = torch.optim.Adam(model.parameters(), lr=lrl)
                    print("hidden layer", layer, "layer size", size,
                          "learning rate", lrl)
                    train(model, optimizer, train_dataloader,
                          validation_dataloader, epochs, device, layer, func,
                          False)

    #Selected final model with saving option
    model = MyModel(2, 1024)
    model = model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0003)
    print("Last training before test")
    train(model, optimizer, train_dataloader, validation_dataloader, 24,
          device, 2, "relu", True)
示例#2
0
def show_predictions(model_path, tub_paths, start=0, end=100, index=0):
    images, y, predictions = [], [], []
    img_ok = 0

    model = MyModel(min_throttle=0., max_throttle=1.)
    model.load(model_path)
    pi = PreprocessImage()

    for path in tub_paths:
        files = glob.glob(os.path.join(path, 'record*.json'))
        for filename in files:
            with open(filename, encoding='utf-8') as data_file:
                data = json.loads(data_file.read())
                if os.path.isfile(os.path.join(path, data['cam/image_array'])):
                    img_ok += 1
                    y.append([data['user/angle'], data['user/throttle']])
                    img = Image.open(
                        os.path.join(path, data['cam/image_array']))
                    predictions.append(model.run(pi.run(np.array(img))))
                    img = np.array(img)
                    images.append(img)

    images = np.array(images)
    y = np.array(y)
    predictions = np.array(predictions)

    fig, ax = plt.subplots()
    plt.plot(y[start:end, index])
    plt.plot(predictions[start:end, index])
    plt.show()
示例#3
0
def train_model(config):

    data_transforms = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(0.5, 0.5)])
    my_dataset = MyDataset("session-2/data/data/data/",
                           "session-2/data/chinese_mnist.csv",
                           transform=data_transforms)
    train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(
        my_dataset, [10000, 2500, 2500])
    train_loader = DataLoader(train_dataset,
                              batch_size=config["batch_size"],
                              shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=config["batch_size"])
    test_loader = DataLoader(test_dataset, batch_size=config["batch_size"])

    my_model = MyModel(config["h1"], config["h2"], config["h3"],
                       config["h4"]).to(device)

    optimizer = optim.Adam(my_model.parameters(), config["lr"])
    for epoch in range(config["epochs"]):
        loss, acc = train_single_epoch(my_model, train_loader, optimizer)
        print(f"Train Epoch {epoch} loss={loss:.2f} acc={acc:.2f}")
        loss, acc = eval_single_epoch(my_model, val_loader)
        print(f"Eval Epoch {epoch} loss={loss:.2f} acc={acc:.2f}")

    loss, acc = eval_single_epoch(my_model, test_loader)
    print(f"Test loss={loss:.2f} acc={acc:.2f}")

    return my_model
示例#4
0
def train_model():
    # Create instance of model
    model = MyModel()
    SGD_OPTIMIZER = SGD(learning_rate=0.01, momentum=0.001, nesterov=False)
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD_OPTIMIZER,
                  metrics=["accuracy"])

    schedule_lr = LearningRateScheduler(lambda x: 1e-3 * 0.9**x)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.2,
                                  patience=5,
                                  min_lr=0.001)

    (x_train, x_test, y_train, y_test) = load_dataset()

    # Call data generator
    datagen = data_generator()
    history = model.fit_generator(datagen.flow(x_train, y_train,
                                               batch_size=60),
                                  epochs=10,
                                  verbose=2,
                                  steps_per_epoch=500,
                                  validation_data=(x_test, y_test),
                                  callbacks=[schedule_lr, reduce_lr])

    if not os.path.exists("fashionClassifier"):
        os.makedirs("fashionClassifier")
        tf.saved_model.save(model, "fashionClassifier")
    else:
        tf.saved_model.save(model, "fashionClassifier")
示例#5
0
def test():
    opt.device = 'cuda:0'
    opt.data_root = 'demo/input/'   # The location of your testing data
    opt.mask_root = 'demo/mask/'    # The location of your testing data mask
    testset = MyDataLoader(opt)
    print('Test with %d' % (len(testset)))

    model = MyModel()
    model.initialize(opt)
    model.load_networks('places_irregular')     # For irregular mask inpainting
    # model.load_networks('celebahq_center')    # For centering mask inpainting, i.e., 120*120 hole in 256*256 input

    val_ssim, val_psnr, val_mae, val_losses_G = [], [], [], []
    with torch.no_grad():
        for i, data in enumerate(testset):
            fname = data['fname'][0]
            model.set_input(data)
            I_g, I_o, val_loss_G = model.optimize_parameters(val=True)
            val_s, val_p, val_m = metrics(I_g, I_o)
            val_ssim.append(val_s)
            val_psnr.append(val_p)
            val_mae.append(val_m)
            val_losses_G.append(val_loss_G.detach().item())
            cv2.imwrite('demo/output/' + fname[:-4] + '.png', postprocess(I_o).numpy()[0])
            print('Val (%d/%d) G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' % (
                i + 1, len(testset), np.mean(val_losses_G), np.mean(val_ssim), np.mean(val_psnr), np.mean(val_mae)), end='\r')
        print('Val G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' %
              (np.mean(val_losses_G), np.mean(val_ssim), np.mean(val_psnr), np.mean(val_mae)))
示例#6
0
def main(args):

    print('----------------------------------------------------')
    print("{}-way-{}-shot Few-Shot Relation Classification".format(
        args.N, args.K))
    print("Model: {}".format(args.Model))
    print("config:", args)
    print('----------------------------------------------------')
    start_time = time.time()

    mymodel = MyModel(args)
    mymodel_clone = MyModel_Clone(args)
    best_acc = 0.0
    best_loss = 0.0
    for file_name in os.listdir('model_checkpoint'):
        if 'isNPM.tar' in file_name:
            model_file = 'model_checkpoint/' + file_name
            mymodel.load_state_dict(torch.load(model_file))
            acc, loss = test_model(mymodel, mymodel_clone, args)
            print('model_name:', model_file)
            print('[TEST] | loss: {0:2.6f}, accuracy: {1:2.2f}%'.format(
                loss, acc * 100))
            if acc > best_acc:
                best_acc = acc
                best_loss = loss
                best_model_file = model_file
    print('best_model_name:', best_model_file)
    print('best_loss:', best_loss)
    print('best_acc:', best_acc)
def main():
    device = torch.device('cpu')
    torch.manual_seed(1234)
    layer = 2
    size = 1024
    func = "relu"
    model = MyModel(layer, size)
    model.load_state_dict(torch.load('model_state_dict_final'))
    model.eval()
    transforms = T.Compose([T.ToTensor(), T.Normalize((0.5, ), (0.5, ))])
    test_dataset = MnistDatasetTest('data', 'test', transforms)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=64,
                                 shuffle=False,
                                 num_workers=4)
    result = open("result.txt", "w")  # to create a result
    with torch.no_grad():
        for images, image_name in test_dataloader:
            images = images.to(device)
            prediction = model(images, layer, func)
            for i in range(images.size()[0]):
                x = image_name[i] + ' ' + str(int(torch.argmax(prediction[i])))
                result.write(x)
                result.write("\n")
    result.close()
示例#8
0
def main(args):

    mymodel = MyModel(args)
    cuda = torch.cuda.is_available()
    # if cuda is True:
    #     mymodel = mymodel.cuda()
    dist_list = pre_calculate(mymodel, args)
    np.save("preprocess_file/support_examples_weight_IPN.npy", dist_list)
示例#9
0
文件: main.py 项目: nbassler/tree
def main(args):
    app = QtWidgets.QApplication(sys.argv)

    v = MyView()
    v.show()
    m = MyModel()
    TreeCtrl(m, v)

    app.exec_()
示例#10
0
def train_model(config):
    
    my_dataset = MyDataset(...)
    my_model = MyModel(...).to(device)
    for epoch in range(config["epochs"]):
        train_single_epoch(...)
        eval_single_epoch(...)

    return my_model
def train(train_data, dev_data, my_vocab, train_target, dev_target):
    #model = None
    embed_model = MyModel(my_vocab)
    #model = nn.DataParallel(model)
    embed_model = embed_model
    if classifier_embed_model_path is not None:
        embed_model = torch.load(classifier_embed_model_path)
    #criteria = torch.nn.CrossEntropyLoss()
    model = ClassificationModel(embed_model, hidden_dim * 2, num_classes)
    model = model.to(device)
    #criteria = torch.nn.MSELoss()
    criteria = torch.nn.CrossEntropyLoss()
    model_optim = optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=learning_rate)
    best_acc = -1
    writer = SummaryWriter(exp_name)
    #print(len(train_data))
    all_paragraphs = [
        build_paragraph(this_sample, my_vocab) for this_sample in train_data
    ]
    all_paragraph_lengths = [len(this_sample) for this_sample in train_data]
    train_idx = list(range(len(train_data)))
    for epoch_i in range(num_epoch):
        random.shuffle(train_idx)
        total_loss = 0
        total_batch = 0
        all_paragraphs = [all_paragraphs[i] for i in train_idx]
        all_paragraph_lengths = [all_paragraph_lengths[i] for i in train_idx]
        train_target = [train_target[i] for i in train_idx]
        for current_batch in range(
                int((len(train_data) - 1) / batch_size) + 1):
            if current_batch % 100 == 0:
                print(current_batch)
            model_optim.zero_grad()
            paragraphs = all_paragraphs[current_batch *
                                        batch_size:(current_batch + 1) *
                                        batch_size]
            paragraph_lengths = all_paragraph_lengths[current_batch *
                                                      batch_size:
                                                      (current_batch + 1) *
                                                      batch_size]
            scores = model(paragraphs)
            targets = train_target[current_batch *
                                   batch_size:(current_batch + 1) * batch_size]
            labels = torch.tensor(targets).to(device)
            loss = criteria(scores, labels)
            #print(loss)
            total_loss += loss.item()
            total_batch += 1
            loss.backward()
            model_optim.step()
        acc = evaluate_classifier(model, dev_data, dev_target, my_vocab)
        if acc > best_acc:
            torch.save(model, classifier_model_path)
            best_acc = acc
        writer.add_scalar('accuracy', acc, epoch_i)
示例#12
0
def train(input_dataframe):
    # Split the dataset to train and test
    # Since the fold_num is 5, every train data would be around 80% of the dataset and test data would be around 20% of the dataset.
    fold_num = 5
    kf = KFold(n_splits=fold_num)

    # Store the best model for prediction
    best_score = 0
    best_model = None
    history_score = []

    # This is used for showing the current fold number
    cnt = 1

    # Training part
    for train_idx, test_idx in kf.split(input_dataframe):
        # Split the training and testing part from input dataframe.
        # It is based on the index
        train = input_dataframe.iloc[train_idx[:data_used_for_training]]
        test = input_dataframe.iloc[test_idx[:data_used_for_testing]]

        # Init the model class
        model = MyModel()

        # Prepare the data for training the model
        X = train.loc[:, 'overview']
        y = train.loc[:, 'genres']

        # Train the model
        model.fit(X, y)

        # Prepare the ground truth and prediction for evaluating the performance.
        truth = test.loc[:, 'genres']
        prediction = model.predict(test.loc[:, 'overview'])

        # Compute the score
        score = evaluation(truth, prediction)

        # Store all the score in this list
        history_score.append(score)

        # Store the best model and score
        if score > best_score:
            best_score = score
            best_model = model

        # Print the current states
        print('Accuracy of fold %d: %.2f' % (cnt, score))
        cnt += 1

    # Print the contents
    print('Best score: ' + str(best_score))
    print('Worst score: ' + str(min(history_score)))
    print('Average score: ' + str(np.array(history_score).mean()))

    # Save the best model
    best_model.save_weights(model_name)
示例#13
0
def create_confusion_matrix():
    ''' creates a confusion matrix '''
    X_text, X_num, y = main()
    X_text_train, X_text_test, X_num_train, X_num_test, y_train, y_test = train_test_split(
        X_text, X_num, y)
    model = MyModel()
    model.fit(X_text_train, X_num_train, y_train)
    predictions = model.predict(X_text_test, X_num_test)
    return confusion_matrix(y_test, predictions)
示例#14
0
def make_video(tub_path,
               video_filename='video.avi',
               model_path=None,
               preprocess_angle=None,
               index=None,
               min_throttle=0.,
               max_throttle=1.):
    files = glob.glob(os.path.join(tub_path, 'record*.json'))
    files = sorted(
        files, key=lambda x: int(re.findall(r'\d+', os.path.basename(x))[0]))

    if model_path is not None:
        model = MyModel(min_throttle=min_throttle, max_throttle=max_throttle)
        model.load(model_path)

    pi = PreprocessImage()
    video = None
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    for filename in files:
        with open(filename, encoding='utf-8') as data_file:
            data = json.loads(data_file.read())
            if os.path.isfile(os.path.join(tub_path, data['cam/image_array'])):
                frame = cv2.imread(
                    os.path.join(tub_path, data['cam/image_array']))
                throttle = data['user/throttle']
                angle = data['user/angle']
                xa = int(frame.shape[1] * ((angle + 1) / 2.))
                ya = int(frame.shape[0] * .95)
                xt = int(frame.shape[1] * .95)
                yt = int(frame.shape[0] - frame.shape[0] * throttle)
                if index is None or index == 0:
                    cv2.circle(frame, (xa, ya), 2, (255, 128, 0), -1)
                if index is None or index == 1:
                    cv2.circle(frame, (xt, yt), 2, (255, 128, 0), -1)
                if model_path is not None:
                    img = Image.open(
                        os.path.join(tub_path, data['cam/image_array']))
                    p_angle, p_throttle = model.run(pi.run(np.array(img)))
                    if preprocess_angle is not None:
                        p_angle = preprocess_angle(p_angle)
                    xa = int(frame.shape[1] * ((p_angle + 1) / 2.))
                    ya = int(frame.shape[0] * .9)
                    xt = int(frame.shape[1] * .9)
                    yt = int(frame.shape[0] - frame.shape[0] * p_throttle)
                    if index is None or index == 0:
                        cv2.circle(frame, (xa, ya), 2, (0, 128, 255), -1)
                    if index is None or index == 1:
                        cv2.circle(frame, (xt, yt), 2, (0, 128, 255), -1)
                if video is None:
                    h, w, ch = frame.shape
                    video = cv2.VideoWriter(video_filename, fourcc, 20.,
                                            (w, h))
                video.write(frame)
    cv2.destroyAllWindows()
    video.release()
示例#15
0
def init_model(features):
    model = MyModel(FLAGS.net)
    logits, end_points = model(features, tf.constant(False, tf.bool))

    predictions = {
        'classes': tf.argmax(logits, axis=1),
        'top_3': tf.nn.top_k(logits, k=3)[1],
        'probs': tf.nn.softmax(logits)
    }

    return predictions
示例#16
0
def train(tub_names, model_path, batch_size, epochs):
    model_path = os.path.expanduser(model_path)
    m = MyModel()
    model = m.model
    model.summary()
    X, y = tubs_to_arrays(tub_names, seed=10)

    total_records = len(X)
    total_train = int(total_records * .8)
    total_val = total_records - total_train
    steps_per_epoch = ((total_train // batch_size) + 1) * 2
    validation_steps = (total_val // batch_size) + 1

    print('Train images: %d, Validation images: %d' % (total_train, total_val))
    print('Batch size:', batch_size)
    print('Epochs:', epochs)
    print('Training steps:', steps_per_epoch)
    print('Validation steps:', validation_steps)

    input("Press Enter to continue...")

    train_gen = generator(X[:total_train],
                          y[:total_train],
                          batch_size,
                          train=True,
                          categorical_angle=m.categorical_angle,
                          categorical_throttle=m.categorical_throttle)
    val_gen = generator(X[total_train:],
                        y[total_train:],
                        batch_size,
                        train=False,
                        categorical_angle=m.categorical_angle,
                        categorical_throttle=m.categorical_throttle)

    save_best = ModelCheckpoint(model_path,
                                monitor='val_loss',
                                verbose=0,
                                save_best_only=True,
                                mode='min')

    callbacks = [save_best, CSVLogger("logs/train.log"), OutputCallback()]

    hist = model.fit_generator(train_gen,
                               steps_per_epoch=steps_per_epoch,
                               epochs=epochs,
                               verbose=0,
                               validation_data=val_gen,
                               callbacks=callbacks,
                               validation_steps=validation_steps,
                               workers=4,
                               use_multiprocessing=True)
    return hist
示例#17
0
文件: server.py 项目: qucheng/ELF-1
    def initialize(self):
        opt = elf.Options()
        net_opt = elf.NetOptions()

        opt.loadFromArgs("", self.option_map.getOptionSpec())
        net_opt.loadFromArgs("", self.option_map.getOptionSpec())

        self.rs = elf.RemoteServers(elf.getNetOptions(opt, net_opt), ["actor", "train"])
        GC = elf.BatchReceiver(opt, self.rs)
        GC.setMode(elf.RECV_ENTRY)
        batchsize = opt.batchsize

        print("Batchsize: %d" % batchsize)

        width = 210 // 2
        height = 160 // 2
        T = 6
        num_action = 4

        spec = {}
        spec["actor"] = dict(
            input=dict(s=("float", (3, height, width))),
            reply=dict(a=("int32_t", 1), pi=("float", num_action), V=("float", 1))
        )
        '''
        spec["train"] = dict(
            input=dict(s_=(T, 3, height, width), r_=(T, 1), a_=(T, 1), pi_=(T, num_action), V_=(T, 1)),
        )
        '''

        e = GC.getExtractor()
        desc = allocExtractor(e, batchsize, spec)

        params = {
           "input_dim" : width * height * 3,
           "num_action" : 4
        }

        print("Init GC Wrapper")
        has_gpu = self.options.gpu is not None and self.options.gpu >= 0

        self.wrapper = GCWrapper(
            GC, None, batchsize, desc, num_recv=1, default_gpu=(self.options.gpu if has_gpu else None),
            use_numpy=False, params=params)

        # wrapper.reg_callback("train", self.on_train)
        self.wrapper.reg_callback("actor", self.on_actor)
        self.model = MyModel(params)
        if has_gpu:
            self.model.cuda(self.options.gpu)
        # self.optim = torch.optimi.Adam(self.model.parameters())
        self.n = 0
示例#18
0
 def __init__(self, master=None):
     Frame.__init__(self, master)
     self.master = master
     self.query_img = None
     self.file_path = const.model_path
     self.canvas = Canvas(root, width=1280, height=720)
     self.canvas.pack(expand=YES, fill=BOTH)
     self.init_window()
     self.model = MyModel(self.file_path)
     self.kept_images = []
     self.images_container = []
     self._create_labels()
     self.binary_signatures = IntVar()
     self.nn_arhitectures = {'cifar', 'mnist', 'fmnist'}
示例#19
0
def train():
    opt.device = 'cuda:0'

    opt.data_root = 'demo/input/'   # The location of your training data
    opt.mask_root = 'demo/mask/'    # The location of your training data mask
    train_set = MyDataLoader(opt)

    opt.data_root = 'demo/input/'   # The location of your validation data
    opt.mask_root = 'demo/mask/'    # The location of your validation data mask
    val_set = MyDataLoader(opt)

    model = MyModel()
    model.initialize(opt)

    print('Train/Val with %d/%d' % (len(train_set), len(val_set)))
    for epoch in range(1, 1000):
        print('Epoch: %d' % epoch)
        epoch_iter = 0
        losses_G, ssim, psnr, mae = [], [], [], []
        for i, data in enumerate(train_set):
            epoch_iter += opt.batchSize
            model.set_input(data)
            I_g, I_o, loss_G = model.optimize_parameters()
            s, p, m = metrics(I_g, I_o)
            ssim.append(s)
            psnr.append(p)
            mae.append(m)
            losses_G.append(loss_G.detach().item())
            print('Tra (%d/%d) G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' %
                  (epoch_iter, len(train_set), np.mean(losses_G), np.mean(ssim), np.mean(psnr), np.mean(mae)), end='\r')
            if epoch_iter == len(train_set):
                val_ssim, val_psnr, val_mae, val_losses_G = [], [], [], []
                with torch.no_grad():
                    for i, data in enumerate(val_set):
                        fname = data['fname'][0]
                        model.set_input(data)
                        I_g, I_o, val_loss_G = model.optimize_parameters(val=True)
                        val_s, val_p, val_m = metrics(I_g, I_o)
                        val_ssim.append(val_s)
                        val_psnr.append(val_p)
                        val_mae.append(val_m)
                        val_losses_G.append(val_loss_G.item())
                        if i+1 <= 200:
                            cv2.imwrite('./demo/output/' + fname[:-4] + '.png', postprocess(I_o).numpy()[0])
                    print('Val (%d/%d) G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' %
                          (epoch_iter, len(train_set), np.mean(val_losses_G), np.mean(val_ssim), np.mean(val_psnr), np.mean(val_mae)))
                losses_G, ssim, psnr, mae = [], [], [], []
        model.save_networks('Model_weights')
示例#20
0
    def _load(self, filePath):
        checkpoint = torch.load(filePath)
        model = MyModel(
            device, checkpoint['inputSize'], checkpoint['gatedCnnOutputSize'],
            checkpoint['gatedCnnStride1'], checkpoint['gatedCnnStride2'],
            checkpoint['gatedCnnKernel1'], checkpoint['gatedCnnKernel2'],
            checkpoint['lstmLayer'], checkpoint['lstmHiddenSize'],
            checkpoint['fcOutputSize'], checkpoint['dropout'])
        model.load_state_dict(checkpoint['stateDict'])
        model.eval()

        if self.device.type == 'cpu':
            model.cpu()
        else:
            model.cuda(device=self.device)
        return model
示例#21
0
    def __init__(self):
        super(App, self).__init__()
        self.setContentsMargins(10, 0, 0, 10)
        self.setWindowTitle("Visual Model")
        self.setFont(QFont("Open Sans"))
        # This specific code moves the window always in the middle of the screen

        self._popflag = False
        self._popframe = None

        self.move(0, 0)
        self.setGeometry(0, 0, 800, 500)
        resolution = QDesktopWidget().screenGeometry()
        self.move((resolution.width() / 2) - (self.frameSize().width() / 2),
                  (resolution.height() / 2) - (self.frameSize().height() / 2))

        #self.setFixedSize(800, 550)

        self.modelbase = ModelBase()
        self.datasetview = DatasetView()
        self.settings = Settings()
        self.extensionview = ExtensionView()
        self.mymodel = MyModel()

        self.tab = TabControl(self)
        self.tab.addTab(self.mymodel, "My Models")
        self.tab.addTab(self.modelbase, "Model")
        self.tab.addTab(self.datasetview, "Dataset")
        self.tab.addTab(self.extensionview, "Extension")
        self.tab.addTab(self.settings, "Settings")

        self.tab.setCurrentIndex(1)

        self.setCentralWidget(self.tab)

        try:
            r = requests.get(
                "https://raw.githubusercontent.com/zenqii/visualmodel/main/version.json"
            ).json()
            update = r["update"]

        except:

            update = False

        if update == True:
            self._onpopup()
示例#22
0
def main(args):

    print('----------------------------------------------------')
    print("{}-way-{}-shot Few-Shot Relation Classification".format(
        args.N, args.K))
    print("Model: {}".format(args.Model))
    print("config:", args)
    print('----------------------------------------------------')

    start_time = time.time()
    setup_seed(args.seed)
    mymodel = MyModel(args)
    mymodel_clone = MyModel_Clone(args)
    sample_class_weights = None
    if args.ITT is True:
        sample_class_weights = pre_calculate(mymodel, args)
    train_model(mymodel, mymodel_clone, args, sample_class_weights)
示例#23
0
def predict(title, description):
    # Load model and weights
    model = MyModel()
    model.load_weights(model_name)

    # Clean the description
    des = clean_data_for_overview(description)

    # Predict
    genre = model.predict(np.array([des]))

    # Prepare the output
    res = r'''{
        "title": "%s",
        "description": "%s",
        "genre": "%s"
}''' % (title, description, genre[0])
    print(res)
示例#24
0
def train_model(config):

    data_transforms = transforms.Compose([...])
    train_dataset = ImageFolder...
    train_loader = DataLoader(train_dataset, batch_size=config["batch_size"], shuffle=True)
    test_dataset = ImageFolder...
    test_loader = DataLoader(test_dataset, batch_size=config["batch_size"])

    my_model = MyModel().to(device)

    optimizer = optim.Adam(my_model.parameters(), config["lr"])
    for epoch in range(config["epochs"]):
        loss, acc = train_single_epoch(my_model, train_loader, optimizer)
        print(f"Train Epoch {epoch} loss={loss:.2f} acc={acc:.2f}")
        loss, acc = eval_single_epoch(my_model, test_loader)
        print(f"Eval Epoch {epoch} loss={loss:.2f} acc={acc:.2f}")
    
    return my_model
def train_model(config, train_dataset, val_dataset):

    my_model = MyModel(config).to(device)

    train_dataloader = torch.utils.data.DataLoader(
        train_dataset, batch_size=config["batchsize"], shuffle=True)
    val_dataloader = torch.utils.data.DataLoader(
        val_dataset, batch_size=config["batchsize"], shuffle=False)

    for epoch in range(int(config["epochs"])):
        train_single_epoch(
            my_model, torch.optim.Adam(my_model.parameters(), config["lrate"]),
            torch.nn.CrossEntropyLoss(), train_dataloader)
        print(
            eval_single_epoch(my_model, torch.nn.CrossEntropyLoss(),
                              val_dataloader))

    return my_model
示例#26
0
def build_and_train_conv1_model(inputs, labels, num_of_labels, do_train=True, validation_data=None):
    inputs_transpose, labels_many_to_one, input_len, num_of_features = Utils.convert_to_cnn_inputs_and_labels(inputs,
                                                                                                              labels)
    model = MyModel(input_len=input_len, num_features=num_of_features, num_labels=num_of_labels)
    weights_path = None
    if model_config.continue_train_existing_model:
        weights_path = model_config.model_file_name
    model.build_model("CONV1", [], [], weights_path=weights_path)
    if do_train:
        if validation_data is not None:
            val_inputs_transpose, val_labels_many_to_one, input_len, num_of_features = Utils.convert_to_cnn_inputs_and_labels(
                validation_data[0],
                validation_data[1])
            model.fit(inputs_transpose, labels_many_to_one, model_path=model_config.model_file_name,val_percentage=validation_perc,
                      early_stopping_patience=10, validation_data=(val_inputs_transpose,val_labels_many_to_one), batch_size=batch_size, num_epochs=num_epochs)
        else:
            model.fit(inputs_transpose, labels_many_to_one, model_path=model_config.model_file_name, early_stopping_patience=10,
                  val_percentage=validation_perc, batch_size=batch_size, num_epochs=num_epochs)
    return model
示例#27
0
def build_and_train_gru_model(inputs, labels, num_of_labels, do_train=True):
    num_of_features = len(inputs[0][0])
    model = MyModel(input_len=model_config.input_vec_size, num_features=num_of_features, num_labels=num_of_labels)
    if model_config.continue_train_existing_model:
        weights_path = model_config.model_file_name
    if model_config.many_to_many:
        model.build_model("GRU", [350, 300, 250], [0.05, 0.05, 0.05], weights_path)
        if do_train:
            model.fit(inputs, labels, model_path="model", early_stopping_patience=40, val_percentage=validation_perc,
                      batch_size=batch_size, num_epochs=num_epochs)

    else:
        # NOTE: train - many inputs to one label
        labels_many_to_one = Utils.get_many_to_one_labels(labels, num_of_labels)
        model.build_model("GRU_1", [350, 300, 250], [0.05, 0.05, 0.05], weights_path)
        if do_train:
            model.fit(inputs, labels_many_to_one, model_path="model", early_stopping_patience=40, val_percentage=validation_perc,
                  batch_size=batch_size, num_epochs=num_epochs)
    return model
class PoleWidth(Scene):

    model = MyModel()
    iterations = 16
    mode = '2D'

    def run(self, x_iteration, y_iteration):
        self.model.start()
        pole_width = self.vary(2, 18, x_iteration)
        pole_angle = self.vary(20, 30, x_iteration)
        self.model.pre(pole_width=pole_width,
                       pole_angle=pole_angle,
                       save_as=f'pole_width_{pole_width}.fem')
        self.model.solve()
        res = self.model.post()
        self.model.close()
        return res

    def display_results(self, results):
        plt.plot(self.get_axis(10, 18), results[0])
        plt.show()
示例#29
0
def run():
    use_cuda = torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')

    dataset = [
        {"in" : [0.0, 0.0], "out" : [0.0]},
        {"in" : [0.0, 1.0], "out" : [1.0]},
        {"in" : [1.0, 0.0], "out" : [1.0]},
        {"in" : [1.0, 1.0], "out" : [0.0]}]

    data_loader = get_data_loader(dataset=dataset, shuffle=True)

    model = MyModel()
    model.to(device)

    optimizer = optim.Adam(model.parameters())
    loss_func = nn.MSELoss()

    model.train()

    for _ in range(0, 2000):
        for output, input in data_loader:
            input = input.to(device)
            output = output.to(device)

            result = model(input)
            loss = loss_func(result, output)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

    test_data_loader = get_data_loader(dataset=dataset, shuffle=False)
    model.eval()
    with torch.no_grad():
        for _, input in test_data_loader:
            print(input)
            input = input.to(device)
            result = model(input)
            print(result)
示例#30
0
def init_model(features, labels):
    model = MyModel(FLAGS.net)
    logits, end_points = model(features, tf.constant(False, dtype=tf.bool))

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=labels, logits=logits)
    loss = tf.reduce_mean(cross_entropy)

    predictions = {
        'classes': tf.argmax(logits, axis=1),
        'top_3': tf.nn.top_k(logits, k=3)[1]
    }

    top_1_acc, update_top_1 = tf.metrics.accuracy(labels,
                                                  predictions['classes'],
                                                  name='metrics')
    top_3_acc, update_top_3 = tf.metrics.mean(tf.nn.in_top_k(
        predictions=logits, targets=labels, k=3),
                                              name='metrics')

    running_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES,
                                     scope="metrics")
    metrics_init = tf.variables_initializer(var_list=running_vars)
    metrics_update = tf.group([update_top_1, update_top_3])

    top_1_error = 1.0 - top_1_acc
    top_3_error = 1.0 - top_3_acc

    metrics = {
        'init': metrics_init,
        'update': metrics_update,
        'top_1_error': top_1_error,
        'top_3_error': top_3_error
    }

    tf.summary.scalar('metrics/top_1_error', top_1_error)
    tf.summary.scalar('metrics/top_3_error', top_3_error)

    return loss, predictions, metrics