Exemple #1
0
 def __init__(self):
     # input rgbd image in numpy array format [w h c]
     self.sdmrcnn_model = get_model_instance_segmentation(2).to(
         device, dtype=torch.float)
     self.sdmrcnn_model.load_state_dict(torch.load(os.path.join('19.pth')))
     self.sdmrcnn_model.eval()
     self.siamese_model = SiameseNetwork().cuda()
     self.siamese_model.load_state_dict(torch.load('siamese.pt'))
     self.siamese_model.eval()
Exemple #2
0
def benchmark(config):
    model = SiameseNetwork('inference', config['model'])

    params = {
        'batch_size': config['benchmark']['batch_size'],
        'shuffle': False,
        'dim': config['model']['input_shape']
    }
    dataset_path = config['benchmark']['dataset_path']
    train_dataset = ImageDataset(dataset_path, 'train')
    train_dataset.prepare(config['benchmark']['test_cases'] // 2)
    train_generator = DataGenerator(train_dataset, **params)
    test_dataset = ImageDataset(dataset_path, 'validation')
    test_dataset.prepare(config['benchmark']['test_cases'] // 2)
    test_generator = DataGenerator(test_dataset, **params)

    preds = np.array([])
    gts = np.array([])
    for i in tqdm(range(len(train_generator))):
        batch = train_generator[i]
        pred = model.predict(batch[0])
        preds = np.append(preds, pred.flatten())
        gts = np.append(gts, batch[1])
        # if config['vis_output'] and not i % config['test_cases']//(5*config['batch_size']):
        #     show_output(batch[0][0], batch[0][1], pred, batch[1])
    tr_acc = compute_accuracy(preds, gts)
    print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))

    evaluation_times = []
    preds = np.array([])
    gts = np.array([])
    for i in tqdm(range(len(test_generator))):
        batch = test_generator[i]
        start_t = time.time()
        pred = model.predict(batch[0])

        evaluation_times.append((time.time() - start_t) / len(batch))

        preds = np.append(preds, pred.flatten())
        gts = np.append(gts, batch[1])
        if config['benchmark']['vis_output'] and not i % config['benchmark'][
                'test_cases'] // (5 * config['benchmark']['batch_size']):
            show_output(batch[0][0], batch[0][1], pred, batch[1])
    te_acc = compute_accuracy(preds, gts)

    print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
    print("Average Evaluation Time Per Sample: " +
          str(np.mean(evaluation_times)))

    print(preds)
    print(gts)
Exemple #3
0
def main():
    dataset = EycDataset(train=True)
    net = SiameseNetwork().cuda()
    # net = torch.load('models/model_triplet_pr_po_max_pool_fix.pt')
    print("model loaded")

    train_dataloader = DataLoader(dataset,
                                  shuffle=True,
                                  num_workers=8,
                                  batch_size=train_batch_size)
    # criterion = TGLoss()
    criterion = TripletLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0005)

    for epoch in range(0, epoch_num):
        if epoch % 10 == 0:
            test.test(True)
            test.test(False)

        for i, data in enumerate(train_dataloader):

            # print(data)
            (anchor, positive, negative) = data

            anchor, positive, negative = Variable(anchor).cuda(), Variable(
                positive).cuda(), Variable(negative).cuda()
            (anchor_output, positive_output,
             negative_output) = net(anchor, positive, negative)

            optimizer.zero_grad()
            # loss = criterion(anchor_output, positive_output, negative_output, train_batch_size)
            loss = criterion(anchor_output, positive_output, negative_output)
            loss.backward()
            optimizer.step()

            if i % 10 == 0:
                print("Epoch number {}\n Current loss {}\n".format(
                    epoch, loss.data[0]))

        print("Saving model")
        torch.save(net, 'models/model_triplet_pr_po_max_pool_fix_weighted.pt')
        print("-- Model Checkpoint saved ---")
def train_model():
    def create_base_model():
        conv_base = ResNet50(include_top=False,
                             weights='imagenet',
                             input_shape=(224, 224, 3))

        #conv_base.trainable = False
        x = conv_base.output
        x = tf.keras.layers.Dropout(0.5)(x)
        embedding = GlobalAveragePooling2D()(x)
        embedding = Dense(128)(embedding)
        return Model(conv_base.input, embedding)

    def SiameseNetwork(base_model):
        """
        Create the siamese model structure using the supplied base and head model.
        """
        input_a = Input(shape=(224, 224, 3), name="image1")
        input_b = Input(shape=(224, 224, 3), name="image2")

        processed_a = base_model(input_a)
        processed_b = base_model(input_b)

        head = Concatenate()([processed_a, processed_b])
        head = Dense(1)(head)
        head = Activation(activation='sigmoid')(head)
        return Model([input_a, input_b], head)

    train_ds, val_ds, test_ds, test_labels = generator_fsl.create_generators()

    base_model = create_base_model()
    siamese_network = SiameseNetwork(base_model)

    #siamese_network.save("test.h5")
    lr_schedule = tfa.optimizers.ExponentialCyclicalLearningRate(
        initial_learning_rate=1e-8,
        maximal_learning_rate=1e-6,
        step_size=240,
    )
    opt = Adam(learning_rate=1e-8)

    siamese_network.compile(optimizer=opt,
                            loss='binary_crossentropy',
                            metrics=['accuracy', 'RootMeanSquaredError'])

    history = siamese_network.fit(train_ds,
                                  epochs=100,
                                  steps_per_epoch=50,
                                  validation_data=val_ds,
                                  validation_steps=20)

    prediction = siamese_network.predict_classes(test_ds)
    evaluate = siamese_network.evaluate(test_ds, steps=32)

    return history, evaluate, prediction, test_labels
Exemple #5
0
def main():
    print("Extract data")
    unzip_data()

    print("Split on train and test")
    split_on_train_and_test()

    print("Create datasets")
    train_ds, test_ds = prepare_datasets()

    print("Create data loaders")
    train_sampler = SiameseSampler(train_ds, random_state=RS)
    test_sampler = SiameseSampler(test_ds, random_state=RS)
    train_data_loader = DataLoader(train_ds,
                                   batch_size=BATCH_SIZE,
                                   sampler=train_sampler,
                                   num_workers=4)
    test_data_loader = DataLoader(test_ds,
                                  batch_size=BATCH_SIZE,
                                  sampler=test_sampler,
                                  num_workers=4)

    print("Build computational graph")
    mobilenet = mobilenet_v2(pretrained=True)
    # remove last layer
    mobilenet = torch.nn.Sequential(*(list(mobilenet.children())[:-1]))
    siams = SiameseNetwork(twin_net=TransferTwinNetwork(
        base_model=mobilenet, output_dim=EMBEDDING_DIM))
    siams.to(DEVICE)
    criterion = nn.BCEWithLogitsLoss()
    optimizer = torch.optim.Adam(siams.parameters(), lr=LR)

    print("Train model")
    siams = train(siams, criterion, optimizer, train_data_loader,
                  test_data_loader)

    print("Save model")
    torch.save(siams.twin_net.state_dict(), 'models/twin.pt')
    parser.add_argument(
        '-c',
        '--checkpoint',
        type=str,
        help="Path of model checkpoint to be used for inference.",
        required=True
    )
    parser.add_argument(
        '-o',
        '--out_path',
        type=str,
        help="Path for saving tensorrt model.",
        required=True
    )

    args = parser.parse_args()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    checkpoint = torch.load(args.checkpoint)
    model = SiameseNetwork(backbone=checkpoint['backbone'])
    model.to(device)
    model.load_state_dict(checkpoint['model_state_dict'])
    model.eval()

    torch.onnx.export(model, (torch.rand(1, 3, 224, 224).to(device), torch.rand(1, 3, 224, 224).to(device)), args.out_path, input_names=['input'],
                      output_names=['output'], export_params=True)
    
    onnx_model = onnx.load(args.out_path)
    onnx.checker.check_model(onnx_model)
Exemple #7
0
class Perception(object):
    def __init__(self):
        # input rgbd image in numpy array format [w h c]
        self.sdmrcnn_model = get_model_instance_segmentation(2).to(
            device, dtype=torch.float)
        self.sdmrcnn_model.load_state_dict(torch.load(os.path.join('19.pth')))
        self.sdmrcnn_model.eval()
        self.siamese_model = SiameseNetwork().cuda()
        self.siamese_model.load_state_dict(torch.load('siamese.pt'))
        self.siamese_model.eval()

    def segmentation(self, raw_rgb, raw_depth):
        rgb_raw_img = np.zeros_like(raw_rgb)
        for i in range(raw_rgb.shape[2]):
            rgb_raw_img[:, :, i] = raw_rgb[:, :, 2 - i]

        color_img = rgb_raw_img.astype(np.float) / 255.
        img_mean = [0.485, 0.456, 0.406]
        img_std = [0.229, 0.224, 0.225]
        for c in range(color_img.shape[2]):
            color_img[:, :,
                      c] = (color_img[:, :, c] - img_mean[c]) / img_std[c]

        depth_img = raw_depth.astype(np.float)
        x, y = np.shape(depth_img)
        depth_img.shape = (x, y, 1)
        # depth_img = depth_img / np.amax(depth_img)
        img = np.concatenate((color_img[:, :, 0:2], depth_img), axis=2)

        test_input = [
            torch.from_numpy(np.transpose(img,
                                          [2, 0, 1])).to(device,
                                                         dtype=torch.float)
        ]

        output = self.sdmrcnn_model(test_input)
        mask_list = output[0]['masks'].cpu().detach().numpy()
        masks = np.reshape(mask_list, (len(mask_list), 480, 640))
        # masks = []
        # for mask in mask_list:
        #     mask = mask.reshape(960, 1280)
        #     mask = mask[240:720, 320:960]
        #     masks.append(mask)
        return masks

    def classification(self, masks, raw_rgb, anchor_img):
        scores = []
        img0 = anchor_img
        img0 = torch.from_numpy(np.reshape(img0, (1, 3, 120, 120)))
        masks = np.reshape(masks, (len(masks), 480, 640))
        print('Number of objects detected: %d' % len(masks))
        for mask in masks:
            color_img = np.copy(raw_rgb)
            color_img[np.where(mask < 0.5)] = 0
            color_img = pad(color_img)
            color_img = center_crop_150(color_img)

            img1 = np.reshape(color_img, (1, 3, 120, 120))

            img1 = torch.from_numpy(img1)
            img0, img1 = img0.type(torch.FloatTensor), img1.type(
                torch.FloatTensor)
            img0, img1, = img0.cuda(), img1.cuda()
            output = self.siamese_model(img0, img1)
            output = output.detach().cpu().numpy()
            scores.append(output[0])
        scores = np.asarray(scores)
        res_mask = masks[np.argmax(scores)]
        res_mask = np.reshape(res_mask, (480, 640))
        return res_mask
Exemple #8
0
import argparse
from autolab_core import YamlConfig

from siamese import SiameseNetwork

if __name__ == "__main__":

    # parse the provided configuration file, set tf settings, and train
    conf_parser = argparse.ArgumentParser(description="Train Siamese model")
    conf_parser.add_argument("--config", action="store", default="cfg/train.yaml",
                               dest="conf_file", type=str, help="path to the configuration file")
    conf_args = conf_parser.parse_args()

    # read in config file information from proper section
    config = YamlConfig(conf_args.conf_file)
    model_config = config['model']
    train_config = config['train']

    model = SiameseNetwork('training', model_config)
    model.train(train_config)
Exemple #9
0
        default=25)

    args = parser.parse_args()

    os.makedirs(args.out_path, exist_ok=True)

    # Set device to CUDA if a CUDA device is available, else CPU
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    train_dataset = Dataset(args.train_path, shuffle_pairs=True, augment=True)
    val_dataset = Dataset(args.val_path, shuffle_pairs=False, augment=False)

    train_dataloader = DataLoader(train_dataset, batch_size=8, drop_last=True)
    val_dataloader = DataLoader(val_dataset, batch_size=8)

    model = SiameseNetwork(backbone=args.backbone)
    model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
    criterion = torch.nn.BCELoss()

    writer = SummaryWriter(os.path.join(args.out_path, "summary"))

    best_val = 10000000000

    for epoch in range(args.epochs):
        print("[{} / {}]".format(epoch, args.epochs))
        model.train()

        losses = []
        correct = 0
Exemple #10
0
data_transform = transforms.Compose([
    transforms.ToTensor(),
    normalize,
])

train_loader = CameraDataset(pivot_images,
                             positive_images,
                             batch_size,
                             num_batch,
                             data_transform,
                             is_train=True)
print('Randomly paired data are generated.')

# 2: load network
branch = BranchNetwork()
net = SiameseNetwork(branch)

criterion = ContrastiveLoss(margin=1.0)

optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),
                       lr=learning_rate,
                       weight_decay=0.000001)

# 3: setup computation device
if resume:
    if os.path.isfile(resume):
        checkpoint = torch.load(resume,
                                map_location=lambda storage, loc: storage)
        net.load_state_dict(checkpoint['state_dict'])
        print('resume from {}.'.format(resume))
    else:
Exemple #11
0
from siamese import SiameseNetwork

# Construct Siamese network
model, base_network = SiameseNetwork()
model.load_weights('/book/working/models/siamese.h5')


def intermediate(embs):
    return base_network.predict(embs)
Exemple #12
0
def test_siamese():
    """
    Test that all components the siamese network work correctly by executing a
    training run against generated data.
    """

    num_classes = 5
    input_shape = (3, )
    epochs = 1000

    # Generate some data
    x_train = np.random.rand(100, 3)
    y_train = np.random.randint(num_classes, size=100)

    x_test = np.random.rand(30, 3)
    y_test = np.random.randint(num_classes, size=30)

    # Define base and head model
    def create_base_model(input_shape):
        model_input = Input(shape=input_shape)

        embedding = Dense(4)(model_input)
        embedding = BatchNormalization()(embedding)
        embedding = Activation(activation='relu')(embedding)

        return Model(model_input, embedding)

    def create_head_model(embedding_shape):
        embedding_a = Input(shape=embedding_shape)
        embedding_b = Input(shape=embedding_shape)

        head = Concatenate()([embedding_a, embedding_b])
        head = Dense(4)(head)
        head = BatchNormalization()(head)
        head = Activation(activation='sigmoid')(head)

        head = Dense(1)(head)
        head = BatchNormalization()(head)
        head = Activation(activation='sigmoid')(head)

        return Model([embedding_a, embedding_b], head)

    # Create siamese neural network
    base_model = create_base_model(input_shape)
    head_model = create_head_model(base_model.output_shape)
    siamese_network = SiameseNetwork(base_model, head_model)

    # Prepare siamese network for training
    siamese_network.compile(loss='binary_crossentropy',
                            optimizer=keras.optimizers.adam())

    # Evaluate network before training to establish a baseline
    score_before = siamese_network.evaluate_generator(x_train,
                                                      y_train,
                                                      batch_size=64)

    # Train network
    siamese_network.fit(x_train,
                        y_train,
                        validation_data=(x_test, y_test),
                        batch_size=64,
                        epochs=epochs)

    # Evaluate network
    score_after = siamese_network.evaluate(x_train, y_train, batch_size=64)

    # Ensure that the training loss score improved as a result of the training
    assert (score_before > score_after)
Exemple #13
0
                name = 'Unknown'

            cv2.rectangle(frame, (start_x, start_y), (end_x, end_y),
                          (0, 0, 255), 2)
            cv2.putText(frame, f"{name}:{prob * 100: .2f}%",
                        (start_x, start_y), font, 0.5, (255, 0, 0), 1)

        cv2.imshow("Camera", frame)

        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break

    cv2.destroyAllWindows()
    cap.release()


if __name__ == '__main__':
    best_model_path = './siamese/models/triplet/resnet101.pt'
    predictor_path = './siamese/models/predictor-linear.pt'

    predictor = LinearPredictor().cuda().half()
    predictor.load(predictor_path)
    model = SiameseNetwork(base='resnet101') \
        .load(best_model_path) \
        .cuda() \
        .half() \
        .initialize(predictor, cuda=True, half=True)

    web_cam(model)
    return predictions.round().eq(true_labels).sum().item()


if __name__ == '__main__':
    lr = 0.001
    batch_size = 32
    epochs = 100
    start_epoch = 0

    train_data = FacesDataset(train=True, validation=False, base='resnet101')
    train_loader = DataLoader(train_data, batch_size, False)

    test = FacesDataset(train=False, validation=False, base='resnet101')
    test_loader = DataLoader(test, batch_size, False)

    siamese_network = SiameseNetwork(base='resnet101').cuda()
    siamese_network.load('./models/triplet/resnet101.pt')
    siamese_network.eval()

    predictor_ = LinearPredictor().cuda()

    sys.stdout.write('Training Linear predictor:\n')
    losses, test_losses, train_accuracies_, test_accuracies_ = train(
        siamese_network,
        predictor_,
        train_loader,
        test_loader,
        epochs,
        lr,
        'predictor-linear',
        start_epoch_=start_epoch,
Exemple #15
0
n, c, h, w = pivot_images.shape
assert (h, w) == (180, 320)

print('Note: assume input image resolution is 180 x 320 (h x w)')
data_loader = CameraDataset(pivot_images,
                            positive_images,
                            batch_size,
                            -1,
                            data_transform,
                            is_train=False)
print('load {} batch edge images'.format(len(data_loader)))

# 2: load network
branch = BranchNetwork()
net = SiameseNetwork(branch)

if os.path.isfile(model_name):
    checkpoint = torch.load(model_name, map_location=lambda storage, loc: storage)
    net.load_state_dict(checkpoint['state_dict'])
    print('load model file from {}.'.format(model_name))
else:
    print('Error: file not found at {}'.format(model_name))
    sys.exit()

# 3: setup computation device
device = 'cpu'
if torch.cuda.is_available():
    device = torch.device('cuda:{}'.format(cuda_id))
    net = net.to(device)
    cudnn.benchmark = True
Exemple #16
0
import numpy as np
from siamese import SiameseNetwork, create_pairs, compute_accuracy

# Set constants
BATCH_SIZE = 128
N_EPOCHS = 20
CLASS_DIM = 120

# Construct input data
with open('/book/working/data/inter_emb.np', 'rb') as f:
    X_train = np.array(np.load(f), dtype=np.float32)
with open('/book/working/data/labels.np', 'rb') as f:
    y_train = np.array(np.load(f), dtype=np.int8)
    digit_indices = [np.where(y_train == i)[0] for i in range(CLASS_DIM)]
tr_pairs, tr_y = create_pairs(X_train, digit_indices, CLASS_DIM)

# Construct Siamese network
model, base_network = SiameseNetwork()
model.fit([tr_pairs[:, 0], tr_pairs[:, 1]],
          tr_y,
          batch_size=BATCH_SIZE,
          epochs=N_EPOCHS)

# Compute final accuracy on training set
y_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
tr_acc = compute_accuracy(tr_y, y_pred)
print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))

# Save
model.save('/book/working/models/siamese.h5')
if __name__ == '__main__':
    lr = 0.001
    batch_size = 128
    epochs = 30
    start_epoch = 0

    bases = ['inception', 'resnet', 'densenet', 'mobilenet']
    for base in bases:
        train_data = FacesDataset(train=True, validation=False, base=base)
        train_loader = DataLoader(train_data, batch_size, False)

        test = FacesDataset(train=False, validation=False, base=base)
        test_loader = DataLoader(test, batch_size, False)

        siameseNetwork = SiameseNetwork(base=base).cuda()
        model_name_ = siameseNetwork.name

        print("Starting train of", base)
        losses, test_losses = train(siameseNetwork,
                                    train_loader,
                                    test_loader,
                                    epochs,
                                    lr,
                                    model_name_,
                                    start_epoch_=start_epoch,
                                    adam=False,
                                    patience=7)

        start_epoch = 0
Exemple #18
0
    head = Activation(activation='sigmoid')(head)

    head = Dense(1)(head)
    head = BatchNormalization()(head)
    head = Activation(activation='sigmoid')(head)

    return Model([embedding_a, embedding_b], head)


num_classes = 10
epochs = 999999

base_model = create_base_model(input_shape)
head_model = create_head_model(base_model.output_shape)

siamese_network = SiameseNetwork(base_model, head_model)
siamese_network.compile(loss='binary_crossentropy',
                        optimizer=keras.optimizers.adam(),
                        metrics=['accuracy'])

siamese_checkpoint_path = "./siamese_checkpoint"

siamese_callbacks = [
    EarlyStopping(monitor='val_acc', patience=10, verbose=0),
    ModelCheckpoint(siamese_checkpoint_path,
                    monitor='val_acc',
                    save_best_only=True,
                    verbose=0)
]

siamese_network.fit(x_train,