Ejemplo n.º 1
0
def show_predictions(model_path, tub_paths, start=0, end=100, index=0):
    images, y, predictions = [], [], []
    img_ok = 0

    model = MyModel(min_throttle=0., max_throttle=1.)
    model.load(model_path)
    pi = PreprocessImage()

    for path in tub_paths:
        files = glob.glob(os.path.join(path, 'record*.json'))
        for filename in files:
            with open(filename, encoding='utf-8') as data_file:
                data = json.loads(data_file.read())
                if os.path.isfile(os.path.join(path, data['cam/image_array'])):
                    img_ok += 1
                    y.append([data['user/angle'], data['user/throttle']])
                    img = Image.open(
                        os.path.join(path, data['cam/image_array']))
                    predictions.append(model.run(pi.run(np.array(img))))
                    img = np.array(img)
                    images.append(img)

    images = np.array(images)
    y = np.array(y)
    predictions = np.array(predictions)

    fig, ax = plt.subplots()
    plt.plot(y[start:end, index])
    plt.plot(predictions[start:end, index])
    plt.show()
Ejemplo n.º 2
0
def main(args):

    print('----------------------------------------------------')
    print("{}-way-{}-shot Few-Shot Relation Classification".format(
        args.N, args.K))
    print("Model: {}".format(args.Model))
    print("config:", args)
    print('----------------------------------------------------')
    start_time = time.time()

    mymodel = MyModel(args)
    mymodel_clone = MyModel_Clone(args)
    best_acc = 0.0
    best_loss = 0.0
    for file_name in os.listdir('model_checkpoint'):
        if 'isNPM.tar' in file_name:
            model_file = 'model_checkpoint/' + file_name
            mymodel.load_state_dict(torch.load(model_file))
            acc, loss = test_model(mymodel, mymodel_clone, args)
            print('model_name:', model_file)
            print('[TEST] | loss: {0:2.6f}, accuracy: {1:2.2f}%'.format(
                loss, acc * 100))
            if acc > best_acc:
                best_acc = acc
                best_loss = loss
                best_model_file = model_file
    print('best_model_name:', best_model_file)
    print('best_loss:', best_loss)
    print('best_acc:', best_acc)
Ejemplo n.º 3
0
def train_model():
    # Create instance of model
    model = MyModel()
    SGD_OPTIMIZER = SGD(learning_rate=0.01, momentum=0.001, nesterov=False)
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD_OPTIMIZER,
                  metrics=["accuracy"])

    schedule_lr = LearningRateScheduler(lambda x: 1e-3 * 0.9**x)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.2,
                                  patience=5,
                                  min_lr=0.001)

    (x_train, x_test, y_train, y_test) = load_dataset()

    # Call data generator
    datagen = data_generator()
    history = model.fit_generator(datagen.flow(x_train, y_train,
                                               batch_size=60),
                                  epochs=10,
                                  verbose=2,
                                  steps_per_epoch=500,
                                  validation_data=(x_test, y_test),
                                  callbacks=[schedule_lr, reduce_lr])

    if not os.path.exists("fashionClassifier"):
        os.makedirs("fashionClassifier")
        tf.saved_model.save(model, "fashionClassifier")
    else:
        tf.saved_model.save(model, "fashionClassifier")
Ejemplo n.º 4
0
def train_model(config):

    data_transforms = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(0.5, 0.5)])
    my_dataset = MyDataset("session-2/data/data/data/",
                           "session-2/data/chinese_mnist.csv",
                           transform=data_transforms)
    train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(
        my_dataset, [10000, 2500, 2500])
    train_loader = DataLoader(train_dataset,
                              batch_size=config["batch_size"],
                              shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=config["batch_size"])
    test_loader = DataLoader(test_dataset, batch_size=config["batch_size"])

    my_model = MyModel(config["h1"], config["h2"], config["h3"],
                       config["h4"]).to(device)

    optimizer = optim.Adam(my_model.parameters(), config["lr"])
    for epoch in range(config["epochs"]):
        loss, acc = train_single_epoch(my_model, train_loader, optimizer)
        print(f"Train Epoch {epoch} loss={loss:.2f} acc={acc:.2f}")
        loss, acc = eval_single_epoch(my_model, val_loader)
        print(f"Eval Epoch {epoch} loss={loss:.2f} acc={acc:.2f}")

    loss, acc = eval_single_epoch(my_model, test_loader)
    print(f"Test loss={loss:.2f} acc={acc:.2f}")

    return my_model
Ejemplo n.º 5
0
def create_confusion_matrix():
    ''' creates a confusion matrix '''
    X_text, X_num, y = main()
    X_text_train, X_text_test, X_num_train, X_num_test, y_train, y_test = train_test_split(
        X_text, X_num, y)
    model = MyModel()
    model.fit(X_text_train, X_num_train, y_train)
    predictions = model.predict(X_text_test, X_num_test)
    return confusion_matrix(y_test, predictions)
Ejemplo n.º 6
0
def train(input_dataframe):
    # Split the dataset to train and test
    # Since the fold_num is 5, every train data would be around 80% of the dataset and test data would be around 20% of the dataset.
    fold_num = 5
    kf = KFold(n_splits=fold_num)

    # Store the best model for prediction
    best_score = 0
    best_model = None
    history_score = []

    # This is used for showing the current fold number
    cnt = 1

    # Training part
    for train_idx, test_idx in kf.split(input_dataframe):
        # Split the training and testing part from input dataframe.
        # It is based on the index
        train = input_dataframe.iloc[train_idx[:data_used_for_training]]
        test = input_dataframe.iloc[test_idx[:data_used_for_testing]]

        # Init the model class
        model = MyModel()

        # Prepare the data for training the model
        X = train.loc[:, 'overview']
        y = train.loc[:, 'genres']

        # Train the model
        model.fit(X, y)

        # Prepare the ground truth and prediction for evaluating the performance.
        truth = test.loc[:, 'genres']
        prediction = model.predict(test.loc[:, 'overview'])

        # Compute the score
        score = evaluation(truth, prediction)

        # Store all the score in this list
        history_score.append(score)

        # Store the best model and score
        if score > best_score:
            best_score = score
            best_model = model

        # Print the current states
        print('Accuracy of fold %d: %.2f' % (cnt, score))
        cnt += 1

    # Print the contents
    print('Best score: ' + str(best_score))
    print('Worst score: ' + str(min(history_score)))
    print('Average score: ' + str(np.array(history_score).mean()))

    # Save the best model
    best_model.save_weights(model_name)
Ejemplo n.º 7
0
def test():
    opt.device = 'cuda:0'
    opt.data_root = 'demo/input/'   # The location of your testing data
    opt.mask_root = 'demo/mask/'    # The location of your testing data mask
    testset = MyDataLoader(opt)
    print('Test with %d' % (len(testset)))

    model = MyModel()
    model.initialize(opt)
    model.load_networks('places_irregular')     # For irregular mask inpainting
    # model.load_networks('celebahq_center')    # For centering mask inpainting, i.e., 120*120 hole in 256*256 input

    val_ssim, val_psnr, val_mae, val_losses_G = [], [], [], []
    with torch.no_grad():
        for i, data in enumerate(testset):
            fname = data['fname'][0]
            model.set_input(data)
            I_g, I_o, val_loss_G = model.optimize_parameters(val=True)
            val_s, val_p, val_m = metrics(I_g, I_o)
            val_ssim.append(val_s)
            val_psnr.append(val_p)
            val_mae.append(val_m)
            val_losses_G.append(val_loss_G.detach().item())
            cv2.imwrite('demo/output/' + fname[:-4] + '.png', postprocess(I_o).numpy()[0])
            print('Val (%d/%d) G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' % (
                i + 1, len(testset), np.mean(val_losses_G), np.mean(val_ssim), np.mean(val_psnr), np.mean(val_mae)), end='\r')
        print('Val G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' %
              (np.mean(val_losses_G), np.mean(val_ssim), np.mean(val_psnr), np.mean(val_mae)))
Ejemplo n.º 8
0
def make_video(tub_path,
               video_filename='video.avi',
               model_path=None,
               preprocess_angle=None,
               index=None,
               min_throttle=0.,
               max_throttle=1.):
    files = glob.glob(os.path.join(tub_path, 'record*.json'))
    files = sorted(
        files, key=lambda x: int(re.findall(r'\d+', os.path.basename(x))[0]))

    if model_path is not None:
        model = MyModel(min_throttle=min_throttle, max_throttle=max_throttle)
        model.load(model_path)

    pi = PreprocessImage()
    video = None
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    for filename in files:
        with open(filename, encoding='utf-8') as data_file:
            data = json.loads(data_file.read())
            if os.path.isfile(os.path.join(tub_path, data['cam/image_array'])):
                frame = cv2.imread(
                    os.path.join(tub_path, data['cam/image_array']))
                throttle = data['user/throttle']
                angle = data['user/angle']
                xa = int(frame.shape[1] * ((angle + 1) / 2.))
                ya = int(frame.shape[0] * .95)
                xt = int(frame.shape[1] * .95)
                yt = int(frame.shape[0] - frame.shape[0] * throttle)
                if index is None or index == 0:
                    cv2.circle(frame, (xa, ya), 2, (255, 128, 0), -1)
                if index is None or index == 1:
                    cv2.circle(frame, (xt, yt), 2, (255, 128, 0), -1)
                if model_path is not None:
                    img = Image.open(
                        os.path.join(tub_path, data['cam/image_array']))
                    p_angle, p_throttle = model.run(pi.run(np.array(img)))
                    if preprocess_angle is not None:
                        p_angle = preprocess_angle(p_angle)
                    xa = int(frame.shape[1] * ((p_angle + 1) / 2.))
                    ya = int(frame.shape[0] * .9)
                    xt = int(frame.shape[1] * .9)
                    yt = int(frame.shape[0] - frame.shape[0] * p_throttle)
                    if index is None or index == 0:
                        cv2.circle(frame, (xa, ya), 2, (0, 128, 255), -1)
                    if index is None or index == 1:
                        cv2.circle(frame, (xt, yt), 2, (0, 128, 255), -1)
                if video is None:
                    h, w, ch = frame.shape
                    video = cv2.VideoWriter(video_filename, fourcc, 20.,
                                            (w, h))
                video.write(frame)
    cv2.destroyAllWindows()
    video.release()
Ejemplo n.º 9
0
    def initialize(self):
        opt = elf.Options()
        net_opt = elf.NetOptions()

        opt.loadFromArgs("", self.option_map.getOptionSpec())
        net_opt.loadFromArgs("", self.option_map.getOptionSpec())

        self.rs = elf.RemoteServers(elf.getNetOptions(opt, net_opt), ["actor", "train"])
        GC = elf.BatchReceiver(opt, self.rs)
        GC.setMode(elf.RECV_ENTRY)
        batchsize = opt.batchsize

        print("Batchsize: %d" % batchsize)

        width = 210 // 2
        height = 160 // 2
        T = 6
        num_action = 4

        spec = {}
        spec["actor"] = dict(
            input=dict(s=("float", (3, height, width))),
            reply=dict(a=("int32_t", 1), pi=("float", num_action), V=("float", 1))
        )
        '''
        spec["train"] = dict(
            input=dict(s_=(T, 3, height, width), r_=(T, 1), a_=(T, 1), pi_=(T, num_action), V_=(T, 1)),
        )
        '''

        e = GC.getExtractor()
        desc = allocExtractor(e, batchsize, spec)

        params = {
           "input_dim" : width * height * 3,
           "num_action" : 4
        }

        print("Init GC Wrapper")
        has_gpu = self.options.gpu is not None and self.options.gpu >= 0

        self.wrapper = GCWrapper(
            GC, None, batchsize, desc, num_recv=1, default_gpu=(self.options.gpu if has_gpu else None),
            use_numpy=False, params=params)

        # wrapper.reg_callback("train", self.on_train)
        self.wrapper.reg_callback("actor", self.on_actor)
        self.model = MyModel(params)
        if has_gpu:
            self.model.cuda(self.options.gpu)
        # self.optim = torch.optimi.Adam(self.model.parameters())
        self.n = 0
Ejemplo n.º 10
0
 def __init__(self, master=None):
     Frame.__init__(self, master)
     self.master = master
     self.query_img = None
     self.file_path = const.model_path
     self.canvas = Canvas(root, width=1280, height=720)
     self.canvas.pack(expand=YES, fill=BOTH)
     self.init_window()
     self.model = MyModel(self.file_path)
     self.kept_images = []
     self.images_container = []
     self._create_labels()
     self.binary_signatures = IntVar()
     self.nn_arhitectures = {'cifar', 'mnist', 'fmnist'}
Ejemplo n.º 11
0
def main():
    args = parse_args()

    # load network and weights
    model = Model.load_from_checkpoint(args.model)
    if not args.no_gpu:
        model.cuda()
    model.eval()

    # TODO load data
    img = cv2.imread(str(args.input))[..., ::-1]  # Network expects RGB data

    albumentations_transform = A.Compose([
        A.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        ),
        ToTensorV2(),
    ])

    # TODO input transform
    # TODO: make sure the input is (1, 3, H, W)

    # Feed data through network
    pred = model(img)

    # TODO: post-processing on prediction.

    # Save inference result
    cv2.imwrite(str(args.output), pred)
Ejemplo n.º 12
0
    def _load(self, filePath):
        checkpoint = torch.load(filePath)
        model = MyModel(
            device, checkpoint['inputSize'], checkpoint['gatedCnnOutputSize'],
            checkpoint['gatedCnnStride1'], checkpoint['gatedCnnStride2'],
            checkpoint['gatedCnnKernel1'], checkpoint['gatedCnnKernel2'],
            checkpoint['lstmLayer'], checkpoint['lstmHiddenSize'],
            checkpoint['fcOutputSize'], checkpoint['dropout'])
        model.load_state_dict(checkpoint['stateDict'])
        model.eval()

        if self.device.type == 'cpu':
            model.cpu()
        else:
            model.cuda(device=self.device)
        return model
Ejemplo n.º 13
0
def main(args):

    mymodel = MyModel(args)
    cuda = torch.cuda.is_available()
    # if cuda is True:
    #     mymodel = mymodel.cuda()
    dist_list = pre_calculate(mymodel, args)
    np.save("preprocess_file/support_examples_weight_IPN.npy", dist_list)
Ejemplo n.º 14
0
def train_model(config):

    data_transforms = transforms.Compose([...])
    train_dataset = ImageFolder...
    train_loader = DataLoader(train_dataset, batch_size=config["batch_size"], shuffle=True)
    test_dataset = ImageFolder...
    test_loader = DataLoader(test_dataset, batch_size=config["batch_size"])

    my_model = MyModel().to(device)

    optimizer = optim.Adam(my_model.parameters(), config["lr"])
    for epoch in range(config["epochs"]):
        loss, acc = train_single_epoch(my_model, train_loader, optimizer)
        print(f"Train Epoch {epoch} loss={loss:.2f} acc={acc:.2f}")
        loss, acc = eval_single_epoch(my_model, test_loader)
        print(f"Eval Epoch {epoch} loss={loss:.2f} acc={acc:.2f}")
    
    return my_model
Ejemplo n.º 15
0
def train(train_data, dev_data, my_vocab, train_target, dev_target):
    #model = None
    embed_model = MyModel(my_vocab)
    #model = nn.DataParallel(model)
    embed_model = embed_model
    if classifier_embed_model_path is not None:
        embed_model = torch.load(classifier_embed_model_path)
    #criteria = torch.nn.CrossEntropyLoss()
    model = ClassificationModel(embed_model, hidden_dim * 2, num_classes)
    model = model.to(device)
    #criteria = torch.nn.MSELoss()
    criteria = torch.nn.CrossEntropyLoss()
    model_optim = optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=learning_rate)
    best_acc = -1
    writer = SummaryWriter(exp_name)
    #print(len(train_data))
    all_paragraphs = [
        build_paragraph(this_sample, my_vocab) for this_sample in train_data
    ]
    all_paragraph_lengths = [len(this_sample) for this_sample in train_data]
    train_idx = list(range(len(train_data)))
    for epoch_i in range(num_epoch):
        random.shuffle(train_idx)
        total_loss = 0
        total_batch = 0
        all_paragraphs = [all_paragraphs[i] for i in train_idx]
        all_paragraph_lengths = [all_paragraph_lengths[i] for i in train_idx]
        train_target = [train_target[i] for i in train_idx]
        for current_batch in range(
                int((len(train_data) - 1) / batch_size) + 1):
            if current_batch % 100 == 0:
                print(current_batch)
            model_optim.zero_grad()
            paragraphs = all_paragraphs[current_batch *
                                        batch_size:(current_batch + 1) *
                                        batch_size]
            paragraph_lengths = all_paragraph_lengths[current_batch *
                                                      batch_size:
                                                      (current_batch + 1) *
                                                      batch_size]
            scores = model(paragraphs)
            targets = train_target[current_batch *
                                   batch_size:(current_batch + 1) * batch_size]
            labels = torch.tensor(targets).to(device)
            loss = criteria(scores, labels)
            #print(loss)
            total_loss += loss.item()
            total_batch += 1
            loss.backward()
            model_optim.step()
        acc = evaluate_classifier(model, dev_data, dev_target, my_vocab)
        if acc > best_acc:
            torch.save(model, classifier_model_path)
            best_acc = acc
        writer.add_scalar('accuracy', acc, epoch_i)
Ejemplo n.º 16
0
def predict(title, description):
    # Load model and weights
    model = MyModel()
    model.load_weights(model_name)

    # Clean the description
    des = clean_data_for_overview(description)

    # Predict
    genre = model.predict(np.array([des]))

    # Prepare the output
    res = r'''{
        "title": "%s",
        "description": "%s",
        "genre": "%s"
}''' % (title, description, genre[0])
    print(res)
Ejemplo n.º 17
0
def train_model(config):
    
    my_dataset = MyDataset(...)
    my_model = MyModel(...).to(device)
    for epoch in range(config["epochs"]):
        train_single_epoch(...)
        eval_single_epoch(...)

    return my_model
Ejemplo n.º 18
0
def main(args):
    app = QtWidgets.QApplication(sys.argv)

    v = MyView()
    v.show()
    m = MyModel()
    TreeCtrl(m, v)

    app.exec_()
def train_model(config, train_dataset, val_dataset):

    my_model = MyModel(config).to(device)

    train_dataloader = torch.utils.data.DataLoader(
        train_dataset, batch_size=config["batchsize"], shuffle=True)
    val_dataloader = torch.utils.data.DataLoader(
        val_dataset, batch_size=config["batchsize"], shuffle=False)

    for epoch in range(int(config["epochs"])):
        train_single_epoch(
            my_model, torch.optim.Adam(my_model.parameters(), config["lrate"]),
            torch.nn.CrossEntropyLoss(), train_dataloader)
        print(
            eval_single_epoch(my_model, torch.nn.CrossEntropyLoss(),
                              val_dataloader))

    return my_model
Ejemplo n.º 20
0
class Submission:
    """
    API Wrapper class which loads a saved model upon construction, and uses this to implement an API for feature 
    selection and missing value prediction. This API will be used to perform active learning evaluation in private.
    """
    def __init__(self):
        # Load a saved model here.
        self.model = MyModel()
        self.model.load('most_popular.npy', 'num_answers.npy')

    def select_feature(self, masked_data, can_query):
        """
        Use your model to select a new feature to observe from a list of candidate features for each student in the
            input data, with the goal of selecting features with maximise performance on a held-out set of answers for
            each student.
        Args:
            masked_data (np.array): Array of shape (num_students, num_questions) containing data revealed to the model
                at the current step. Unobserved values are denoted by -1.
            can_query (np.array): Binary array of shape (num_students, num_questions), indicating which features can be 
                queried by the model in the current step.
        Returns:
            selections (list): List of ints, length num_students, containing the index of the feature selected to query 
            for each student (row) in the dataset.
        """
        # Use the loaded model to perform feature selection.
        selections = self.model.select_feature(masked_data, can_query)

        return selections

    def predict(self, masked_data):
        """
        Use your model to predict missing values in the input data.
        Args:
            masked_data (np.array): Array of shape (num_students, num_questions) containing data revealed to the model
                at the current step. Unobserved values are denoted by -1.
        Returns:
            predictions (np.array): Array of shape (num_students, num_questions) containing predictions for the
                unobserved values in `masked_data`. The values given to the observed data in this array will be ignored.
        """
        # Use the loaded model to perform missing value prediction.
        predictions = self.model.predict(masked_data)

        return predictions
Ejemplo n.º 21
0
class MyController():
    def __init__(self, parent):
        self.parent = parent
        self.model = MyModel(self)  # initializes the model
        self.view = MyView(self)  #initializes the view
        #initialize objects in view
        #a non cheat way to do MVC wiht tkinter control variables
        #self.view.setEntry_text('Data')
        #self.view.setLabel_text('>')

    #event handlers
    def salirButtonPressed(self):
        self.parent.destroy()

    def calcularButtonPressed(self):
        key = self.view.getEntry_text_key()
        msg = self.view.getEntry_text_msg()
        self.model.clearList()
        if len(key):
            self.model.addToList(key[0])
        if len(msg):
            self.model.addToList(msg)
        if (len(key) and len(msg)):
            self.view.setLabel_text(self.getCipherText(self.model.getList()))

    def listChangedDelegate(self):
        #model internally chages and needs to signal a change
        print(self.model.getList())

    def getCipherText(self, in_list):
        key = in_list[0]
        msg = in_list[1]
        values = []
        print("Key = ", key, ord(key))
        start = time.time()
        vs = []
        for x in msg:
            vs.append(ord(x))
        print(vs)
        vv = []
        low_level_driver.send_key_byte(ord(key))
        for x in msg:
            time.sleep(0.01)
            low_level_driver.send_data_byte(ord(x))
            time.sleep(0.01)
            v = low_level_driver.get_byte()
            val = unichr(v)
            values.append(val)
            vv.append(v)
        print("Transaction finished. Time = ", time.time() - start)
        print(values)
        print(vv)
        return ''.join(values)
Ejemplo n.º 22
0
def build_and_train_gru_model(inputs, labels, num_of_labels, do_train=True):
    num_of_features = len(inputs[0][0])
    model = MyModel(input_len=model_config.input_vec_size, num_features=num_of_features, num_labels=num_of_labels)
    if model_config.continue_train_existing_model:
        weights_path = model_config.model_file_name
    if model_config.many_to_many:
        model.build_model("GRU", [350, 300, 250], [0.05, 0.05, 0.05], weights_path)
        if do_train:
            model.fit(inputs, labels, model_path="model", early_stopping_patience=40, val_percentage=validation_perc,
                      batch_size=batch_size, num_epochs=num_epochs)

    else:
        # NOTE: train - many inputs to one label
        labels_many_to_one = Utils.get_many_to_one_labels(labels, num_of_labels)
        model.build_model("GRU_1", [350, 300, 250], [0.05, 0.05, 0.05], weights_path)
        if do_train:
            model.fit(inputs, labels_many_to_one, model_path="model", early_stopping_patience=40, val_percentage=validation_perc,
                  batch_size=batch_size, num_epochs=num_epochs)
    return model
Ejemplo n.º 23
0
def init_model(features):
    model = MyModel(FLAGS.net)
    logits, end_points = model(features, tf.constant(False, tf.bool))

    predictions = {
        'classes': tf.argmax(logits, axis=1),
        'top_3': tf.nn.top_k(logits, k=3)[1],
        'probs': tf.nn.softmax(logits)
    }

    return predictions
Ejemplo n.º 24
0
def main():
    device = torch.device('cpu')
    torch.manual_seed(1234)
    layer = 2
    size = 1024
    func = "relu"
    model = MyModel(layer, size)
    model.load_state_dict(torch.load('model_state_dict_final'))
    model.eval()
    transforms = T.Compose([T.ToTensor(), T.Normalize((0.5, ), (0.5, ))])
    test_dataset = MnistDatasetTest('data', 'test', transforms)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=64,
                                 shuffle=False,
                                 num_workers=4)
    result = open("result.txt", "w")  # to create a result
    with torch.no_grad():
        for images, image_name in test_dataloader:
            images = images.to(device)
            prediction = model(images, layer, func)
            for i in range(images.size()[0]):
                x = image_name[i] + ' ' + str(int(torch.argmax(prediction[i])))
                result.write(x)
                result.write("\n")
    result.close()
Ejemplo n.º 25
0
def train(tub_names, model_path, batch_size, epochs):
    model_path = os.path.expanduser(model_path)
    m = MyModel()
    model = m.model
    model.summary()
    X, y = tubs_to_arrays(tub_names, seed=10)

    total_records = len(X)
    total_train = int(total_records * .8)
    total_val = total_records - total_train
    steps_per_epoch = ((total_train // batch_size) + 1) * 2
    validation_steps = (total_val // batch_size) + 1

    print('Train images: %d, Validation images: %d' % (total_train, total_val))
    print('Batch size:', batch_size)
    print('Epochs:', epochs)
    print('Training steps:', steps_per_epoch)
    print('Validation steps:', validation_steps)

    input("Press Enter to continue...")

    train_gen = generator(X[:total_train],
                          y[:total_train],
                          batch_size,
                          train=True,
                          categorical_angle=m.categorical_angle,
                          categorical_throttle=m.categorical_throttle)
    val_gen = generator(X[total_train:],
                        y[total_train:],
                        batch_size,
                        train=False,
                        categorical_angle=m.categorical_angle,
                        categorical_throttle=m.categorical_throttle)

    save_best = ModelCheckpoint(model_path,
                                monitor='val_loss',
                                verbose=0,
                                save_best_only=True,
                                mode='min')

    callbacks = [save_best, CSVLogger("logs/train.log"), OutputCallback()]

    hist = model.fit_generator(train_gen,
                               steps_per_epoch=steps_per_epoch,
                               epochs=epochs,
                               verbose=0,
                               validation_data=val_gen,
                               callbacks=callbacks,
                               validation_steps=validation_steps,
                               workers=4,
                               use_multiprocessing=True)
    return hist
Ejemplo n.º 26
0
def test_model(ds_name, encoder, paths, categorical=False):
    """The main function for executing network testing. It loads the specified
       dataset iterator and optimized saliency model. By default, when no model
       checkpoint is found locally, the pretrained weights will be downloaded.

    Args:
        ds_name (str): Denotes the dataset that was used during training.
        encoder (str): the name of the encoder want to be used to predict.
        paths (dict, str): A dictionary with all path elements.
    """

    w_filename_template = "/%s_%s_%s_weights.h5" # [encoder]_[ds_name]_weights.h5

    (test_ds, n_test) = data.load_test_dataset(ds_name, paths["data"], categorical)
    
    print(">> Preparing model with encoder %s..." % encoder)

    model = MyModel(encoder, ds_name, "test")

    weights_path = paths["weights"] + w_filename_template % (encoder, ds_name, loss_fn_name)
    if os.path.exists(weights_path):
        print("Weights are loaded!\n    %s"%weights_path)
    else:
        download.download_pretrained_weights(paths["weights"], encoder, ds_name, loss_fn_name)
    model.load_weights(weights_path)
    del weights_path

    print(">> Start predicting using model trained on %s..." % ds_name.upper())
    results_path = paths["results"] + "%s/%s/%s/" % (ds_name, encoder, loss_fn_name)

    # Preparing progbar
    test_progbar = Progbar(n_test)
    for test_images, test_ori_sizes, test_filenames in test_ds:
        pred = test_step(test_images, model)
        for pred, filename, ori_size in zip(pred, test_filenames.numpy(), test_ori_sizes):
            img = data.postprocess_saliency_map(pred, ori_size, as_image=True)
            tf.io.write_file(results_path + filename.decode("utf-8"), img)
        test_progbar.add(test_images.shape[0])
Ejemplo n.º 27
0
    def predict(self,
                data,
                test_path='./data/test.csv',
                checkpoint_save_path='./data/saved_model.h5'):
        if self.model == 'mlp':
            # mlp超参数
            batch_size = 128
            epochs = 1000
            init_lr = 5e-4
            '''
            参数提取/备用
            file = open('./data/pre_weights.txt', 'w')
            for v in model.trainable_weights:
                file.write(str(v.name) + '\n')
                file.write(str(v.shape) + '\n')
                file.write(str(v.numpy()) + '\n')
            file.close()
            '''
            to_pre = data[891:].copy()
            to_pre.drop(['Survived'], axis=1, inplace=True)

            if reTrain:
                trainer = Trainer(data,
                                  batch_size=batch_size,
                                  epochs=epochs,
                                  init_lr=init_lr)
                model = trainer.train()
            else:
                model = MyModel.my_mlp()
                model.load_weights(checkpoint_save_path)
            to_pre = tf.convert_to_tensor(to_pre.values, dtype=tf.float64)
            pred_labels = model.predict(to_pre)
            pred_labels = tf.argmax(pred_labels, axis=1)

        elif self.model == 'bayes':
            pred_labels = np.array(MyModel.my_bayes(data))

        elif self.model == 'ranforest':
            pred_labels = np.array(MyModel.my_ranforest(data), dtype=np.int)

        elif self.model == 'voting':
            bayes_pred = np.array(MyModel.my_bayes(data))
            ranforest_pred = np.array(MyModel.my_ranforest(data), dtype=np.int)
            model = MyModel.my_mlp()
            model.load_weights(checkpoint_save_path)
            to_pre = data[891:].copy()
            to_pre.drop(['Survived'], axis=1, inplace=True)
            to_pre = tf.convert_to_tensor(to_pre.values, dtype=tf.float64)
            pred_labels = model.predict(to_pre)
            mlp_pred = tf.argmax(pred_labels, axis=1)
            result = [bayes_pred, ranforest_pred, mlp_pred]
            pred_labels = stats.mode(np.array(result))[0][0]
        # 处理结果成kaggle接受的数据格式
        test = pd.read_csv(test_path)
        test.drop(test.columns[1:], axis=1, inplace=True)
        test.insert(1, 'Survived', pred_labels)

        return test
Ejemplo n.º 28
0
def main():
    audio_path = 'wav_files\\long_dr1_7.wav'
    # NOTE: no overlap in the input vectors frames
    inputs = Utils.audio_to_model_inputs(audio_path,
                                         model_config.input_vec_size)
    inputs_transposed = Utils.convert_to_cnn_inputs(inputs)
    input_len = len(inputs_transposed[0])
    num_of_features = len(inputs_transposed[0][0])
    num_of_labels = PhonesSet.get_num_labels()

    model = MyModel(input_len=input_len,
                    num_features=num_of_features,
                    num_labels=num_of_labels)
    model_file = model_config.model_file_name
    model.build_model("CONV1", [], [], weights_path=model_file)

    thread = threading.Thread(target=send_visemes_to_smartbody,
                              args=(model, inputs_transposed))
    thread.daemon = True  # Daemon thread
    thread.start()

    loader_audio_path = 'wav_files\\long_dr1_7_loader.wav'
    play_wav(loader_audio_path)
Ejemplo n.º 29
0
    def initialize_data(self, features, adj_train, gamma, eta, beta, l,
                        n_clusters, lr, clustering_labels, epochs):
        self.number_of_features, self.epochs = features.shape[1], epochs
        self.gamma, self.eta, self.beta = gamma, eta, beta

        self.clustering_labels = clustering_labels

        # normalize the adj
        adj_train_norm = compute_adj_norm(adj_train)
        S = getS(adj_train, features, l)
        F = getF(S)
        Y = getY(adj_train, beta, features)

        # this method needs to have the complete adj_train (not only the triu)
        D = getD(adj_train)

        # covnert matrices to tensors
        self.adj_train_norm_tensor = convert_sparse_matrix_to_sparse_tensor(
            adj_train_norm)
        self.Y_tensor = tf.convert_to_tensor(Y, dtype="float32")
        self.D_tensor = tf.convert_to_tensor(D, dtype="float32")

        self.F_tensor = tf.convert_to_tensor(F, dtype="float32")
        self.S_tensor = tf.convert_to_tensor(S, dtype="float32")
        self.feature_tensor = convert_sparse_matrix_to_sparse_tensor(features)

        # define the ground truth
        self.y_actual = [adj_train.toarray().flatten(), features.toarray()]

        # define the optimizer
        self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr)

        # define the model
        self.model = MyModel(self.Y_tensor, n_clusters, self.D_tensor,
                             self.adj_train_norm_tensor,
                             self.number_of_features)
Ejemplo n.º 30
0
def run():
    use_cuda = torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')

    dataset = [
        {"in" : [0.0, 0.0], "out" : [0.0]},
        {"in" : [0.0, 1.0], "out" : [1.0]},
        {"in" : [1.0, 0.0], "out" : [1.0]},
        {"in" : [1.0, 1.0], "out" : [0.0]}]

    data_loader = get_data_loader(dataset=dataset, shuffle=True)

    model = MyModel()
    model.to(device)

    optimizer = optim.Adam(model.parameters())
    loss_func = nn.MSELoss()

    model.train()

    for _ in range(0, 2000):
        for output, input in data_loader:
            input = input.to(device)
            output = output.to(device)

            result = model(input)
            loss = loss_func(result, output)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

    test_data_loader = get_data_loader(dataset=dataset, shuffle=False)
    model.eval()
    with torch.no_grad():
        for _, input in test_data_loader:
            print(input)
            input = input.to(device)
            result = model(input)
            print(result)