コード例 #1
0
def main(resume, use_cuda=False, use_augment=False):

    ## path
    if True:
        timestamp = datetime.now().strftime(r"%Y-%m-%d_%H-%M-%S")
        save_path = os.path.join('detection', timestamp)
        if not os.path.exists(save_path):
            os.makedirs(save_path)
            print('make a test result folder: ', save_path)
    else:
        save_path = None

    ## cuda or cpu
    if use_cuda:
        device = torch.device("cuda:0")
        print("using cuda")
    else:
        device = torch.device("cpu")
        print("using CPU")

    if use_augment:
        print("data are augmented randomly")

    ## dataloader
    test_path = './metadata/test_images.json'
    new_test_path = './metadata/new_test_images.json'
    detection_path = './metadata/detection_test_images.json'

    dataset = SeqDataset(phase='test',
                         do_augmentations=use_augment,
                         metafile_path=detection_path,
                         return_gt_label=False)

    data_loader = DataLoader(
        dataset,
        batch_size=1,
        num_workers=1,
        shuffle=False,
        pin_memory=True,
    )

    ## CNN model
    output_dim = 3
    model = MyNet(output_dim)
    ## resume a ckpt
    checkpoint = torch.load(resume)
    model.load_state_dict(checkpoint['state_dict'])

    print(model)

    ## evaluate
    log = evaluate(model,
                   data_loader,
                   device,
                   draw_path=save_path,
                   use_conf=True)
コード例 #2
0
def main(_):
    # Graph input
    images = tf.placeholder(
        tf.float32, [batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL])
    tags = tf.placeholder(tf.float32, [batch_size, TAG_SIZE])
    similarity = tf.placeholder(tf.float32, [batch_size])
    alpha = tf.placeholder(tf.float32, [1])
    gamma = tf.placeholder(tf.float32, [1])
    learning_rate = tf.placeholder(tf.float32)
    #momentum = tf.placeholder(tf.float32,[1])

    # Contruct Text network
    Tag_v = multilayer_perceptron(tags)

    # Construct Image network
    net = MyNet({'data': images})

    image_fc7 = net.layers['fc7']

    Image_u = add_final_training_op(image_fc7)
    #
    #    Tag_v1,Tag_v2 = tf.split(0,2,Tag_v)
    #    Image_u1,Image_u2 = tf.split(0,2,Image_u)
    #    np_images1,np_tags1 = next(data_gen1)

    #    loss = _loss(Image_u1,Tag_v1,Image_u2,Tag_v2,similarity,alpha,gamma)
    #
    #    opt = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)
    #
    #    # Add ops to save and restore all the variable.
    #    saver = tf.train.Saver()
    #
    #
    with tf.Session() as sess:
        # Merge all the summaries and write them out to /tmp/retrain_logs (by default)
        #        merged = tf.merge_all_summaries()
        #        train_writer = tf.train.SummaryWriter(summaries_dir + '/summary_train',sess.graph)
        # Load the data
        sess.run(tf.initialize_all_variables())
        print "load net"
        net.load('mynet.npy', sess)

        image_hd, tag_hd = readhdf5(Data_Path + cate1 + "/hdf5.h5")
        data_gen = gen_data_batch(image_hd, tag_hd)

        for i in xrange(MAX_ITERATION):
            np_images, np_tags = next(data_gen)
            feed = {images: np_images, tags: np_tags}
            sess.run([Tag_v, Image_u], feed_dict=feed)
コード例 #3
0
def main(resume, use_cuda=False, use_augment=False):

    ## path
    if True:
        timestamp = datetime.now().strftime(r"%Y-%m-%d_%H-%M-%S")
        save_path = os.path.join('detection', timestamp)
        if not os.path.exists(save_path):
            os.makedirs(save_path)
            print('make a test result folder: ', save_path)
    else:
        save_path = None

    ## cuda or cpu
    if use_cuda:
        device = torch.device("cuda:0")
        print("using cuda")
    else:
        device = torch.device("cpu")
        print("using CPU")

    if use_augment:
        print("data are augmented randomly")

    ## dataloader
    data_loader = StreamingDataloader(imwidth=224)

    ## CNN model
    output_dim = 3
    model = MyNet(output_dim)
    ## resume a ckpt
    checkpoint = torch.load(resume)
    model.load_state_dict(checkpoint['state_dict'])
    model.eval()
    model = model.to(device)
    print(model)

    ## init a detector
    detector = Detector(model, data_loader, device)

    ## perform detection
    """
    real-time feeding the image path to the detector
    """
    data_folder = '/home/yanglei/codes/WSOL/detection/small_test'
    image_path = os.path.join(data_folder, 'new_test0000.png')
    detector(image_path, draw_path=save_path)
コード例 #4
0
def predict(img):
    """
    加载模型和模型预测
    主要步骤:
        1.加载模型(请加载你认为的最佳模型)
        2.图片处理
        3.用加载的模型预测图片的类别
    :param img: PIL.Image 对象
    :return: string, 模型识别图片的类别, 
            共 'cardboard','glass','metal','paper','plastic','trash' 6 个类别
    """
    # 加载模型,加载请注意 model_path 是相对路径, 与当前文件同级。
    # 如果你的模型是在 results 文件夹下的 dnn.h5 模型,则 model_path = 'results/dnn.h5'
    model_path = "weights/mynet-64-8.16-0.81.pth"
    try:
        # 作业提交时测试用, 请勿删除此部分
        model_path = os.path.realpath(__file__).replace('main.py', model_path)
    except NameError:
        model_path = './' + model_path

    # -------------------------- 实现模型预测部分的代码 ---------------------------
    labels = {
        0: 'cardboard',
        1: 'glass',
        2: 'metal',
        3: 'paper',
        4: 'plastic',
        5: 'trash'
    }
    trans = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    img = trans(img)
    img = torch.unsqueeze(img, dim=0)

    # 加载模型
    net = MyNet().cpu()
    net.load_state_dict(
        torch.load(model_path, map_location=torch.device('cpu')))
    net.eval()
    with torch.no_grad():
        pred = net(img)
        pred = pred.numpy()
        predict = labels[np.argmax(pred)]

    # -------------------------------------------------------------------------

    # 返回图片的类别
    return predict
コード例 #5
0
ファイル: finetune_mnist.py プロジェクト: xjustusc/mlwithtf
def gen_data_batch(source):
    data_gen = gen_data(source)
    while True:
        image_batch = []
        label_batch = []
        for _ in range(batch_size):
            image, label = next(data_gen)
            image_batch.append(image)
            label_batch.append(label)
        yield np.array(image_batch), np.array(label_batch)


images = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
net = MyNet({'data': images})

ip2 = net.layers['ip2']
pred = net.layers['prob']

loss = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=ip2, labels=labels), 0)
opt = tf.train.RMSPropOptimizer(0.001)
train_op = opt.minimize(loss)

with tf.Session() as sess:
    # Load the data
    sess.run(tf.global_variables_initializer())
    net.load('mynet.npy', sess)

    data_gen = gen_data_batch(mnist.train)
コード例 #6
0
 def _build_network(self):
     with tf.name_scope('AlexNet'):
         # remove [15:23] from the network
         mynet = MyNet({'data': self.x})
     return mynet
コード例 #7
0
def main():
    TRAIN_IMG_DIR = './dlcv_final_2_dataset/train/'
    TRAIN_ID_DIR = './dlcv_final_2_dataset/train_id.txt'
    VAL_IMG_DIR = './dlcv_final_2_dataset/val/'
    VAL_ID_DIR = './dlcv_final_2_dataset/val_id.txt'
    MODEL_DIR = './model/mynet.hdf5'
    HISTORY_DIR = './history/history_mynet2.pickle'

    IMG_HEIGHT = 218
    IMG_WIDTH = 178
    BATCH_SIZE = 16
    INITIAL_EPOCH = 0
    EPOCHS = 100

    # Load the dictionary for id
    with open('../dictionary/dual_dict.txt', 'r') as f:
        dual_dict = json.load(f)

    with open('../dictionary/id_dict.txt', 'r') as f:
        id_dict = json.load(f)

    N_CLASSES = len(id_dict)
    enc = OneHotEncoder(n_values=N_CLASSES)

    # Read train_id.txt
    id_df = pd.read_csv(TRAIN_ID_DIR, sep=' ', header=None)

    # Get training data and labels
    train_id = [id_dict[ele] for ele in id_df[1].tolist()]
    train_img_name = id_df[0].tolist()

    # Read val_id.txt
    id_df = pd.read_csv(VAL_ID_DIR, sep=' ', header=None)

    # Get validation data and labels
    val_id = [id_dict[ele] for ele in id_df[1].tolist()]
    val_img_name = id_df[0].tolist()

    model = MyNet(input_shape=(IMG_HEIGHT, IMG_WIDTH, 3), n_classes=N_CLASSES)
    model.compile(optimizer=Adam(lr=1e-4),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    checkpoint = ModelCheckpoint(MODEL_DIR,
                                 monitor='val_acc',
                                 save_best_only=True,
                                 verbose=1,
                                 save_weights_only=False)

    print('Start train')
    history = model.fit_generator(
        data_generator(TRAIN_IMG_DIR, BATCH_SIZE, IMG_HEIGHT, IMG_WIDTH,
                       train_img_name, train_id, enc, N_CLASSES, dual_dict),
        steps_per_epoch=int(np.ceil(len(train_id) / BATCH_SIZE)),
        epochs=EPOCHS,
        validation_data=data_generator(VAL_IMG_DIR, BATCH_SIZE, IMG_HEIGHT,
                                       IMG_WIDTH, val_img_name, val_id, enc,
                                       N_CLASSES, dual_dict),
        validation_steps=int(np.ceil(len(val_id) / BATCH_SIZE)),
        initial_epoch=INITIAL_EPOCH,
        callbacks=[checkpoint])

    with open(HISTORY_DIR, 'wb') as fin:
        pickle.dump(history.history, fin)

    return
コード例 #8
0
ファイル: main.py プロジェクト: xdr940/utils
import torch

from mynet import MyNet

x = torch.tensor([1, 2, 3], dtype=torch.float32)
y = torch.tensor([4, 5, 6], dtype=torch.float32)
x.requires_grad = True
y.requires_grad = True

net = MyNet()
z = net(x, y)
#对向量不能直接求到,需要传入参数
z.backward(torch.tensor([1, 1.2, 1], dtype=torch.float32))
#z[0].backward()
# debug
print('x: ', x)
print('y: ', y)
print('z: ', z)
print('x.grad: ', x.grad)
print('y.grad: ', y.grad)
コード例 #9
0
def main(config, resume):

    # parameters
    batch_size = config.get('batch_size', 32)
    start_epoch = config['epoch']['start']
    max_epoch = config['epoch']['max']
    lr = config.get('lr', 0.0005)
    use_conf = config.get('use_conf', False)

    ## path
    save_path = config['save_path']
    timestamp = datetime.now().strftime(r"%Y-%m-%d_%H-%M-%S")
    save_path = os.path.join(save_path, timestamp)

    result_path = os.path.join(save_path, 'result')
    if not os.path.exists(result_path):
        os.makedirs(result_path)

    model_path = os.path.join(save_path, 'model')
    if not os.path.exists(model_path):
        os.makedirs(model_path)

    dest = shutil.copy('train.py', save_path)
    print("save to: ", dest)

    ## cuda or cpu
    if config['n_gpu'] == 0 or not torch.cuda.is_available():
        device = torch.device("cpu")
        print("using CPU")
    else:
        device = torch.device("cuda:0")

    ## dataloader
    dataset = Dataset(phase='train', do_augmentations=False)
    data_loader = DataLoader(
        dataset,
        batch_size=int(batch_size),
        num_workers=1,
        shuffle=True,
        drop_last=True,
        pin_memory=True,
        # **loader_kwargs,
    )

    val_dataset = Dataset(phase='val', do_augmentations=False)
    val_data_loader = DataLoader(
        val_dataset,
        batch_size=int(batch_size),
        num_workers=1,
        shuffle=True,
        drop_last=True,
        pin_memory=True,
        # **loader_kwargs,
    )

    ## few shot
    do_few_shot = True
    if do_few_shot:
        fs_dataset = Dataset(
            phase='train',
            do_augmentations=False,
            metafile_path='metadata/detection_train_images.json')
        fs_data_loader = DataLoader(
            fs_dataset,
            batch_size=int(128),
            num_workers=1,
            shuffle=True,
            pin_memory=True,
            # **loader_kwargs,
        )

    ## CNN model
    output_dim = 3
    model = MyNet(output_dim)
    model = model.to(device)
    model.train()
    print(model)

    ## loss
    criterion = nn.CrossEntropyLoss(reduction='none')

    ## optimizer
    params = list(filter(lambda p: p.requires_grad, model.parameters()))
    optim_params = {
        'lr': lr,
        'weight_decay': 0,
        'amsgrad': False,
    }
    optimizer = torch.optim.Adam(params, **optim_params)
    lr_params = {
        'milestones': [10],
        'gamma': 0.1,
    }
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, **lr_params)

    loss_avg = AverageMeter()
    acc_avg = AverageMeter()
    fs_loss_avg = AverageMeter()
    fs_acc_avg = AverageMeter()
    logger = SimpleLogger(['train_loss', 'train_acc', 'val_loss', 'val_acc'])

    ## loop
    for epoch in range(start_epoch, max_epoch):
        loss_avg.reset()

        for batch_idx, batch in tqdm(
                enumerate(data_loader),
                total=len(data_loader),
                ncols=80,
                desc=f'training epoch {epoch}',
        ):
            data = batch[0].to(device)
            gt_lbls = batch[1].to(device)
            gt_gt_lbls = batch[2].to(device)

            ## set zerograd
            optimizer.zero_grad()

            ## run forward pass
            out = model(data)  ## logits: [B, NC]; conf: [B, 1]
            preds = torch.max(out, dim=-1)[1]
            # print("out shape: ", out.shape)

            weights = model.compute_entropy_weight(out)
            # print("weights shape: ", weights.shape)

            ## compute loss
            class_loss = criterion(out, gt_lbls)  ## [B, 1]
            # print("class_loss shape: ", class_loss.shape)

            if use_conf:
                loss = (class_loss * (weights**2) + (1 - weights)**2).mean()
            else:
                loss = class_loss.mean()

            ## record
            loss_avg.update(loss.item(), batch_size)
            positive = ((gt_lbls == preds) + (gt_gt_lbls > 2)).sum()
            batch_acc = positive.to(torch.float) / batch_size
            acc_avg.update(batch_acc.item(), batch_size)

            ## run backward pass
            loss.backward()
            optimizer.step()  ## update

        ## each epoch
        logger.update(loss_avg.avg, 'train_loss')
        logger.update(acc_avg.avg, 'train_acc')
        print("train loss: ", loss_avg.avg)
        print("train acc: ", acc_avg.avg)

        if do_few_shot and fs_data_loader is not None:
            for batch_idx, batch in tqdm(
                    enumerate(fs_data_loader),
                    total=len(fs_data_loader),
                    ncols=80,
                    desc=f'training epoch {epoch}',
            ):
                data = batch[0].to(device)
                gt_lbls = batch[1].to(device)
                gt_gt_lbls = batch[2].to(device)

                ## set zerograd
                optimizer.zero_grad()

                ## run forward pass
                out = model(data)  ## logits: [B, NC]; conf: [B, 1]
                preds = torch.max(out, dim=-1)[1]
                # print("out shape: ", out.shape)

                weights = model.compute_entropy_weight(out)
                # print("weights shape: ", weights.shape)

                ## compute loss
                class_loss = criterion(out, gt_lbls)  ## [B, 1]
                # print("class_loss shape: ", class_loss.shape)

                if use_conf:
                    loss = (class_loss * (weights**2) +
                            (1 - weights)**2).mean()
                else:
                    loss = class_loss.mean()

                ## record
                positive = ((gt_lbls == preds) + (gt_gt_lbls > 2)).sum()
                batch_acc = positive.to(torch.float) / data.shape[0]
                fs_loss_avg.update(loss.item(), data.shape[0])
                fs_acc_avg.update(batch_acc.item(), data.shape[0])

                ## run backward pass
                loss = loss * 1.0
                loss.backward()
                optimizer.step()  ## update

            # print(f"\nfew-shot: {preds}, {gt_gt_lbls}")
            ## each epoch
            print("fs train loss: ", fs_loss_avg.avg)
            print("fs train acc: ", fs_acc_avg.avg)

        if val_data_loader is not None:
            log = evaluate(model.eval(),
                           val_data_loader,
                           device,
                           use_conf=use_conf)
            model.train()

            logger.update(log['loss'], 'val_loss')
            logger.update(log['acc'], 'val_acc')
            print("val loss: ", log['loss'])
            print("val acc: ", log['acc'])

        best_idx = logger.get_best('val_acc', best='max')
        if best_idx == epoch:
            print('save ckpt')
            ## save ckpt
            _save_checkpoint(model_path, epoch, model)

        lr_scheduler.step()
        print()

    ## save final model
    _save_checkpoint(model_path, epoch, model)
コード例 #10
0

X, Y = gen_data(N_DATA + N_Val)
XTrain, YTrain = X[:N_DATA], Y[:N_DATA]
XTest, YTest = X[N_DATA:], Y[N_DATA:]

XTrain = Variable(torch.FloatTensor(XTrain))
YTrain = Variable(torch.FloatTensor(YTrain))

XTest = Variable(torch.FloatTensor(XTest))
YTest = Variable(torch.FloatTensor(YTest))

train_set = MyDataset(x=XTrain, y=YTrain)
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)

model = MyNet(n_in=1, n_out=1).to(device)

model.eval()
YPRED = model.forward(XTest)

criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=INITIAL_LEARNING_RATE)
scheduler = StepLR(optimizer, 20)

for epoch in range(N_EPOCHS):
    model.train()
    epoch_loss = 0
    for i, (x, y) in enumerate(train_loader):
        x.to(device)
        y.to(device)
        #        set_trace()
コード例 #11
0
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=100,
                                          shuffle=False,
                                          num_workers=4)

text_labels = [
    't-shirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal', 'shirt',
    'sneaker', 'bag', 'ankle boot'
]
#vis_datasets(train_loader,text_labels)

epoch_size = 20
lr = 0.001
device = torch.device('cuda')

net = MyNet(1).to(device)

optimizer = optim.Adam(net.parameters(), lr=lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                           milestones=[14, 19],
                                           gamma=0.1,
                                           last_epoch=-1)
criterion = nn.CrossEntropyLoss()
loss_total = 0

for epoch in range(epoch_size):

    for index, (imgs, labels) in enumerate(train_loader):
        imgs, labels = imgs.to(device), labels.to(device)
        optimizer.zero_grad()
        out = net(imgs)