def __init__(self, env, args):
     # define some important
     self.env = env
     self.args = args
     # define the network
     self.net = net(self.env.action_space.n, self.args.use_dueling)
     # copy the self.net as the
     self.target_net = copy.deepcopy(self.net)
     # make sure the target net has the same weights as the network
     self.target_net.load_state_dict(self.net.state_dict())
     if self.args.cuda:
         self.net.cuda()
         self.target_net.cuda()
     # define the optimizer
     self.optimizer = torch.optim.Adam(self.net.parameters(),
                                       lr=self.args.lr)
     # define the replay memory
     self.buffer = replay_buffer(self.args.buffer_size)
     # define the linear schedule of the exploration
     self.exploration_schedule = linear_schedule(int(self.args.total_timesteps * self.args.exploration_fraction), \
                                                 self.args.final_ratio, self.args.init_ratio)
     # create the folder to save the models
     if not os.path.exists(self.args.save_dir):
         os.mkdir(self.args.save_dir)
     # set the environment folder
     self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
     if not os.path.exists(self.model_path):
         os.mkdir(self.model_path)
Esempio n. 2
0
 def __init__(self, envs, args):
     self.envs = envs
     self.args = args
     # define the network
     self.net = net(self.envs.action_space.n)
     if self.args.cuda:
         self.net.cuda()
     # define the optimizer
     self.optimizer = torch.optim.RMSprop(self.net.parameters(),
                                          lr=self.args.lr,
                                          eps=self.args.eps,
                                          alpha=self.args.alpha)
     if not os.path.exists(self.args.save_dir):
         os.mkdir(self.args.save_dir)
     # check the saved path for envs..
     self.model_path = self.args.save_dir + self.args.env_name + '/'
     if not os.path.exists(self.model_path):
         os.mkdir(self.model_path)
     # get the obs..
     self.batch_ob_shape = (self.args.num_workers * self.args.nsteps,
                            ) + self.envs.observation_space.shape
     self.obs = np.zeros(
         (self.args.num_workers, ) + self.envs.observation_space.shape,
         dtype=self.envs.observation_space.dtype.name)
     self.obs[:] = self.envs.reset()
     self.dones = [False for _ in range(self.args.num_workers)]
HAS_CUDA = torch.cuda.is_available()
# HAS_CUDA = False
IMG_SCALE = 1. / 255
IMG_MEAN = np.array([0.485, 0.456, 0.406]).reshape((1, 1, 3))
IMG_STD = np.array([0.229, 0.224, 0.225]).reshape((1, 1, 3))
MAX_DEPTH = 8.
MIN_DEPTH = 0.
NUM_CLASSES = 40
NUM_TASKS = 2  # segm + depth


def prepare_img(img):
    return (img * IMG_SCALE - IMG_MEAN) / IMG_STD


model = net(num_classes=NUM_CLASSES, num_tasks=NUM_TASKS)
if HAS_CUDA:
    _ = model.cuda()
_ = model.eval()

ckpt = torch.load('../../weights/ExpNYUD_joint.ckpt')
model.load_state_dict(ckpt['state_dict'])

mask_img = np.zeros((480, 640))
cap = cv2.VideoCapture(-1)
# print(torch.cuda.available())
xx = 0
while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    xx += 1
    b_normcv2 = False

    batch_size = 2

    # image visualization
    b_visualize = True

    # graph visualization
    b_plot = True
    save = True

    # weights
    pesi = '../weights_13_cpm_no_vgg_out_8BIT/weights.041-0.00920.hdf5'

    # model
    model = net(input_shape=(1, rows, cols), weights_path=pesi)

    # loading test names
    test_data_names = load_names(val_seq=2, augm=0, dataset=2)
    show = True
    thresold = 0.3
    TP = TN = FP = FN = FP_DIST = 0
    FPS = []
    TOTiou = []
    for image in range(len(test_data_names)):

        sys.stdout.write("\r%.2f%%" %
                         (((image + 1) / float(len(test_data_names))) * 100))
        sys.stdout.flush()
        seq = test_data_names[image]['image'].split('\\')[-3]
        frame = 0
Esempio n. 5
0
        # -----------start to train-------------
        print("Processing epoch {}".format(epoch))
        frame_id = 0
        if os.path.isdir("{}/{:04d}".format(output_folder, epoch)):
            continue
        else:
            os.makedirs("{}/{:04d}".format(output_folder, epoch))
        if not os.path.isdir("{}/training".format(output_folder)):
            os.makedirs("{}/training".format(output_folder))

        print(len(input_names), len(processed_names))
        for id in range(num_of_sample):
            if with_IRT:
                if epoch < 6 and ARGS.IRT_initialization:
                    net_in, net_gt = data_in_memory[0]  #Option:
                    prediction = net(net_in)

                    crt_loss = loss_L1(prediction[:, :3, :, :],
                                       net_gt) + 0.9 * loss_L1(
                                           prediction[:, 3:, :, :], net_gt)

                else:
                    net_in, net_gt = prepare_paired_input(
                        task, id, input_names, processed_names)
                    net_in = torch.from_numpy(net_in).permute(
                        0, 3, 1, 2).float().to(device)
                    net_gt = torch.from_numpy(net_gt).permute(
                        0, 3, 1, 2).float().to(device)
                    # net_in,net_gt = data_in_memory[id]
                    prediction = net(net_in)
Esempio n. 6
0
def main():
    if not os.path.exists(args.outdir):
        os.makedirs(args.outdir)

    #device = torch.device("cuda:0")

    criterion = CrossEntropyLoss().cuda()
    if args.type == 'norm':
        loss_metric = NormSoftmax(args.embedding_size, args.num_classes,
                                  args.scale_rate).cuda()
    elif args.type == 'aamp':
        loss_metric = ArcMarginProduct(args.embedding_size,
                                       args.num_classes,
                                       s=args.scale_rate,
                                       m=args.margin).cuda()
    elif args.type == 'lmcp':
        loss_metric = AddMarginProduct(args.embedding_size,
                                       args.num_classes,
                                       s=args.scale_rate,
                                       m=args.margin).cuda()
    elif args.type == 'sphere':
        loss_metric = SphereProduct(args.embedding_size,
                                    args.num_classes,
                                    m=int(args.margin)).cuda()
    elif args.type == 'lgm':
        loss_metric = CovFixLGM(args.embedding_size, args.num_classes,
                                args.margin).cuda()
    elif args.type == 'lgm2':
        loss_metric = LGMLoss(args.embedding_size, args.num_classes,
                              args.margin).cuda()
    elif args.type == 'none':
        loss_metric = None

    if loss_metric is not None:
        model = net(embedding_size=args.embedding_size,
                    class_size=args.num_classes,
                    only_embeddings=True,
                    pretrained=True)
        to_be_optimized = chain(model.parameters(), loss_metric.parameters())
    else:
        model = net(embedding_size=args.embedding_size,
                    class_size=args.num_classes,
                    only_embeddings=False,
                    pretrained=True)
        to_be_optimized = model.parameters()

    model = torch.nn.DataParallel(model).cuda()
    optimizer = torch.optim.SGD(to_be_optimized,
                                lr=args.learning_rate,
                                momentum=0.9,
                                weight_decay=args.weight_decay)

    #scheduler = lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               cooldown=2,
                                               verbose=True)

    if args.start_epoch != 0:
        checkpoint = torch.load(args.outdir + '/model_checkpoint.pth.tar')
        args.start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
    if args.evaluate:
        checkpoint = torch.load(args.outdir + '/model_best.pth.tar')
        args.start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        data_loaders = get_dataloader(args.database_dir, args.train_dir,
                                      args.valid_dir, args.test_dir,
                                      args.batch_size, args.num_workers)
        test(model, data_loaders['test'], '00', is_graph=True)

    else:
        for epoch in range(args.start_epoch,
                           args.num_epochs + args.start_epoch):
            print(80 * '=')
            print('Epoch [{}/{}]'.format(
                epoch, args.num_epochs + args.start_epoch - 1))

            data_loaders = get_dataloader(args.database_dir, args.train_dir,
                                          args.valid_dir, args.test_dir,
                                          args.batch_size, args.num_workers)

            train(model, optimizer, epoch, data_loaders['train'], criterion,
                  loss_metric)
            is_best, acc, loss = validate(model, optimizer, epoch,
                                          data_loaders['valid'], criterion,
                                          loss_metric)
            scheduler.step(loss)
            if is_best and acc > 100:
                test(model, data_loaders['test'], epoch, is_graph=True)

        print(80 * '=')

        ## MODEL EVALUATION LOGGING ##
        data_loaders = get_dataloader(args.database_dir, args.train_dir,
                                      args.valid_dir, args.test_dir,
                                      args.batch_size, args.num_workers)
        checkpoint = torch.load(args.outdir + '/model_best.pth.tar')
        model.load_state_dict(checkpoint['state_dict'])
        EER = test(model, data_loaders['test'], epoch, is_graph=False)

        header = [
            'weight_decay', 'learning_rate', 'scale', 'margin', 'type',
            'batch_size', 'embedding_size', 'EER', 'out_dir'
        ]
        info = [
            args.weight_decay, args.learning_rate, args.scale_rate,
            args.margin, args.type, args.batch_size, args.embedding_size, EER,
            args.outdir
        ]

        if not os.path.exists(args.logdir):
            with open(args.logdir, 'w') as file:
                logger = csv.writer(file)
                logger.writerow(header)
                logger.writerow(info)
        else:
            with open(args.logdir, 'a') as file:
                logger = csv.writer(file)
                logger.writerow(info)
Esempio n. 7
0

# update the current observation
def get_tensors(obs):
    input_tensor = torch.tensor(np.transpose(obs, (2, 0, 1)),
                                dtype=torch.float32).unsqueeze(0)
    return input_tensor


if __name__ == "__main__":
    args = get_args()
    # create environment
    #env = VecFrameStack(wrap_deepmind(make_atari(args.env_name)), 4)
    env = make_atari(args.env_name)
    env = wrap_deepmind(env, frame_stack=True)
    # get the model path
    model_path = args.save_dir + args.env_name + '/model.pt'
    network = net(env.action_space.n)
    network.load_state_dict(
        torch.load(model_path, map_location=lambda storage, loc: storage))
    obs = env.reset()
    while True:
        env.render()
        # get the obs
        with torch.no_grad():
            input_tensor = get_tensors(obs)
            _, pi = network(input_tensor)
        actions = select_actions(pi, True)
        obs, reward, done, _ = env.step([actions])
    env.close()
Esempio n. 8
0
    b_scale = True
    b_normcv2 = False
    b_tanh = False
    limit_train = 20
    #limit_train = -1
    limit_test = 10

    #limit_test = -1
    b_debug = False
    fulldepth = False
    removeBackground = False
    equalize = False

    WEIGHT = 'weights'

    model = net(input_shape=(1, rows, cols))

    model.summary()

    # load train name
    train_data_names = load_names(dataset=0)
    train_data_names = train_data_names + load_names(dataset=1)
    # train_data_names = load_names(dataset=2)
    random.shuffle(train_data_names)
    # load validation name
    val_data_names = load_names_val()
    # data augmentation
    if data_augmentation:
        for i in range(1, 9):
            tmp = load_names(augm=i)
            train_data_names = train_data_names + tmp
Esempio n. 9
0
                    default='model',
                    type=str,
                    help='name of the saved model')
parser.add_argument('--num_epoch',
                    default=100,
                    type=int,
                    help='number of epoch')
parser.add_argument('--resume',
                    '-r',
                    action='store_true',
                    help='resume from checkpoint')
args = parser.parse_args()

best_acc = 0

net = net().to('cuda')
net = torch.nn.DataParallel(net)
cudnn.benchmark = True

if args.resume:
    print('==> Resuming from checkpoint..')
    assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
    checkpoint = torch.load('./checkpoint/{}.t7'.format(args.model_name))
    net.load_state_dict(checkpoint['net'])
    best_F2 = checkpoint['F2']

train_loader = Data.DataLoader(loader('datafile/train.txt'),
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=8,
                               drop_last=True)
import torch
from rl_utils.env_wrapper.atari_wrapper import make_atari, wrap_deepmind

def get_tensors(obs):
    obs = np.transpose(obs, (2, 0, 1))
    obs = np.expand_dims(obs, 0)
    obs = torch.tensor(obs, dtype=torch.float32)
    return obs

if __name__ == '__main__':
    args = get_args()
    # create the environment
    env = make_atari(args.env_name)
    env = wrap_deepmind(env, frame_stack=True)
    # create the network
    net = net(env.action_space.n, args.use_dueling) 
    # model path
    model_path = args.save_dir + args.env_name + '/model.pt'
    # load the models
    net.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
    # start to test the demo
    obs = env.reset()
    for _ in range(2000):
        env.render()
        with torch.no_grad():
            obs_tensor = get_tensors(obs)
            action_value = net(obs_tensor)
        action = torch.argmax(action_value.squeeze()).item()
        obs, reward, done, _ = env.step(action)
        if done:
            obs = env.reset()
Esempio n. 11
0
    image.requires_grad = True
    return image


def load_crop_image(xmin, ymin, xmax, ymax):
    image_path = "data/1.png"
    image = crop_image(image_path, xmin, ymin, xmax, ymax)
    image = cv2.resize(image, (224, 224))
    totensor = torchvision.transforms.ToTensor()
    image = totensor(image)
    image = torch.unsqueeze(image, 0)
    image.requires_grad = True
    return image


detection = net()
classi = mnasnet.MnasNet()

#define the optimizer parameter
optimizer = torch.optim.SGD(detection.parameters(), lr=0.01)
optimizer2 = torch.optim.SGD(classi.parameters(), lr=0.01)


#define the test
def train():
    for i in range(1000):
        if i % 10 == 0:
            torch.save(detection.state_dict(), 'data/params.pkl')
        image = load_image()
        _, loss, boxes = detection.forward(image)
        print(boxes)
Esempio n. 12
0
import os
import shutil

from torch.utils.tensorboard import SummaryWriter

if __name__ == '__main__':
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=2)
    parser.add_argument('--lr', type=float, default=0.0001)
    parser.add_argument('--epoch', type=int, default=250)
    args = parser.parse_args()

    train_loader, test_loader = datasets.prepare(batch_size=args.batch_size)
    model = models.net(num_classes=datasets.num_classes).to(device)
    criterion = models.loss(num_classes=datasets.num_classes)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    log_dir = 'data/runs'
    if os.path.exists(log_dir):
        shutil.rmtree(log_dir)
        os.makedirs(log_dir)
    else:
        os.makedirs(log_dir)
    writer = SummaryWriter(log_dir=log_dir)
    epoch_digit = len(list(str(args.epoch)))
    for epoch in range(args.epoch):
        model.train()
        train_loss = 0
        train_acc = 0