Beispiel #1
0
def main():
    # add argumentation
    parser = argparse.ArgumentParser(description='MobileNet_v2_DeepLab_v3 Pytorch Implementation')
    #todo maybe make it work with multiple datasets?
    #parser.add_argument('--dataset', default='cityscapes', choices=['cityscapes', 'other'],
    #                    help='Dataset used in training MobileNet v2+DeepLab v3')
    parser.add_argument('--root', default='./data/cityscapes', help='Path to your dataset')
    parser.add_argument('--epoch', default=None, help='Total number of training epoch')
    parser.add_argument('--lr', default=None, help='Base learning rate')
    parser.add_argument('--pretrain', default=None, help='Path to a pre-trained backbone model')
    parser.add_argument('--resume_from', default=None, help='Path to a checkpoint to resume model')
    parser.add_argument('--logdir', default=None, help='Directory to save logs for Tensorboard')
    parser.add_argument('--batch_size', default=128, help='Batch size for training')

    args = parser.parse_args()
    params = Params()

    # parse args
    if not os.path.exists(args.root):
        if params.dataset_root is None:
            raise ValueError('ERROR: Root %s doesn\'t exist!' % args.root)
    else:
        params.dataset_root = args.root
    if args.epoch is not None:
        params.num_epoch = int(args.epoch)
    if args.lr is not None:
        params.base_lr = args.lr
    if args.pretrain is not None:
        params.pre_trained_from = args.pretrain
    if args.resume_from is not None:
        params.resume_from = args.resume_from
    if args.logdir is not None:
        params.logdir = args.logdir
    params.summary_dir, params.ckpt_dir = create_train_dir(params.logdir)
    params.train_batch = int(args.batch_size)

    LOG('Network parameters:')
    print_config(params)

    # create dataset and transformation
    LOG('Creating Dataset and Transformation......')
    datasets = create_dataset(params)
    LOG('Creation Succeed.\n')

    # create model
    LOG('Initializing MobileNet and DeepLab......')
    net = MobileNetv2_DeepLabv3(params, datasets)
    LOG('Model Built.\n')

    # let's start to train!
    net.Train()
    net.Test()
Beispiel #2
0
 def __init__(self):
     self.p = Printer(1)
     self.param = Params()
     self.m = Math()
     self.am = AudioManager()
     self.paths = Paths()
     self.trainingDesc, self.testingDesc = self.scanForAudioFiles()
def gqn_draw_identity_model_fn(features, labels, mode, params=None):
    _CONFIG['CONTEXT_SIZE'] = 1
    params = Params(**_CONFIG)
    _SEQ_LENGTH = params.SEQ_LENGTH

    query_pose = features.query_camera
    target_frame = labels
    poses = features.query_camera
    frames = labels

    mu_target, ep_gqn = gqn_draw(query_pose, target_frame, poses, frames,
                                 params, mode != tf.estimator.ModeKeys.PREDICT)
    sigma_target = _linear_noise_annealing(params)
    target_normal = tf.distributions.Normal(loc=mu_target, scale=sigma_target)
    target_sample = tf.identity(target_normal.sample, name='target_sample')
    l2_reconstruction = tf.identity(
        tf.metrics.mean_squared_error(labels=target_frame,
                                      predictions=mu_target))
    if params.DEBUG:
        tf.summary.image('context_frame_1', frames, max_outputs=1)
        tf.summary.image('target_images', labels, max_outputs=1)
        tf.summary.image('target_means', mu_target, max_outputs=1)
        tf.summary.scalar('l2_reconstruction', l2_reconstruction[1])
        gs = debug_canvas_image_mean(
            [ep_gqn['canvas_{}'.format(i)] for i in range(_SEQ_LENGTH)])
        tf.summary.image('generator_sequence_mean', gs, max_outputs=1)

    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {'target_sample': target_sample}
        estimator_spec = tf.estimator.EstimatorSpec(mode=mode,
                                                    predictions=predictions)
    else:
        mu_q, sigma_q, mu_pi, sigma_pi = [], [], [], []
        for i in range(_SEQ_LENGTH):
            mu_q.append(ep_gqn['mu_q_{}'.format(i)])
            sigma_q.append(ep_gqn['sigma_q_{}'.format(i)])
            mu_pi.append(ep_gqn['mu_pi_{}'.format(i)])
            sigma_pi.append(ep_gqn['sigma_pi_{}'.format(i)])
        elbo, ep_elbo = gqn_draw_elbo(mu_target, sigma_target, mu_q, sigma_q,
                                      mu_pi, sigma_pi, target_frame)
        if params.DEBUG:
            tf.summary.scalar('target_llh', ep_elbo['target_llh'])
            tf.summary.scalar('kl_regularizer', ep_elbo['kl_regularizer'])

        if mode == tf.estimator.ModeKeys.TRAIN:
            lr = _linear_lr_annealing(params)
            optimizer = tf.train.AdamOptimizer(learning_rate=lr)
            train_op = optimizer.minimize(elbo, tf.train.get_global_step())
            estimator_spec = tf.estimator.EstimatorSpec(mode=mode,
                                                        loss=elbo,
                                                        train_op=train_op)
        else:
            eval_metric_ops = {
                'l2_reconstruction':
                tf.metrics.mean_squared_error(target_frame, mu_target)
            }
            estimator_spec = tf.estimator.EstimatorSpec(
                mode=mode, loss=elbo, eval_metric_ops=eval_metric_ops)
    return estimator_spec
Beispiel #4
0
 def __init__(self):
     self.paths = Paths();
     self.param = Params();
     self.pc = PrintConfig();
     self.p = Printer(1);
     self.am = AudioManager();
     self.m = Math();
     self.pickle = Pickle(self.paths.pickle, lTag=self.paths.tag1, sTag=self.paths.tag2);
     
     self.data = self.pickle.LoadData();
Beispiel #5
0
def main():
    # add argumentation
    parser = argparse.ArgumentParser(
        description='MobileNet_v2_DeepLab_v3 Pytorch Implementation')
    parser.add_argument(
        '--dataset',
        default='cityscapes',
        choices=['cityscapes', 'other'],
        help='Dataset used in training MobileNet v2+DeepLab v3')
    parser.add_argument('--root',
                        default='./data/cityscapes',
                        help='Path to your dataset')
    parser.add_argument('--epoch',
                        default=None,
                        help='Total number of training epoch')
    parser.add_argument('--lr', default=None, help='Base learning rate')
    parser.add_argument('--pretrain',
                        default=None,
                        help='Path to a pre-trained backbone model')
    parser.add_argument('--resume_from',
                        default=None,
                        help='Path to a checkpoint to resume model')

    args = parser.parse_args()
    params = Params()

    # parse args
    if not os.path.exists(args.root):
        if params.dataset_root is None:
            raise ValueError('ERROR: Root %s not exists!' % args.root)
    else:
        params.dataset_root = args.root
    if args.epoch is not None:
        params.num_epoch = args.epoch
    if args.lr is not None:
        params.base_lr = args.lr
    if args.pretrain is not None:
        params.pre_trained_from = args.pretrain
    if args.resume_from is not None:
        params.resume_from = args.resume_from

    LOG('Network parameters:')
    print_config(params)

    # create dataset and transformation
    LOG('Creating Dataset and Transformation......')
    datasets = create_dataset(params)
    LOG('Creation Succeed.\n')

    # create model
    LOG('Initializing MobileNet and DeepLab......')
    net = MobileNetv2_DeepLabv3(params, datasets)
    LOG('Model Built.\n')

    # let's start to train!
    net.Train()
    net.Test()
Beispiel #6
0
    def __init__(self):
        self.paths = Paths()
        self.param = Params()
        self.pc = PrintConfig()
        self.data = SpeachData()
        self.p = Printer(1)
        self.am = AudioManager()
        self.m = Math()
        self.pickle = Pickle(self.paths.pickle, sTag=self.paths.tag1)

        self.data.raw = self.am.readAudio(self.paths.file)
Beispiel #7
0
def create_expansionai_env(env_id, video=False, params=Params()):
    gym.spec(env_id)._kwargs = {
        'armies': params.armies,
        'board_size': params.board_size,
        'offset_x': 0,
        'offset_y': 0
    }
    env = gym.make(env_id)
    if video:
        env = wrappers.Monitor(env, 'test', force=True, mode='training')
    return env
Beispiel #8
0
def main():
    parser = argparse.ArgumentParser(
        description='MobileNet_V2 Pytorch Implementation')
    parser.add_argument('--dataset',
                        default='cifar10',
                        choices=['imagenet', 'cifar10', 'cifar100', 'other'],
                        help='Dataset used in training MobileNet V2')
    parser.add_argument('--root',
                        default='./data/cifar10',
                        help='Path to your dataset')

    args = parser.parse_args()

    # parse args
    if args.dataset == 'cifar10':
        params = CIFAR10_params()
    elif args.dataset == 'cifar100':
        params = CIFAR100_params()
    else:
        params = Params()
    params.dataset_root = args.root

    if not os.path.exists(args.root):
        print('ERROR: Root %s not exists!' % args.root)
        exit(1)
    """ TEST CODE """
    # params = CIFAR100_params
    # params.dataset_root = '/home/ubuntu/cifar100'

    # create model
    print('\nInitializing MobileNet......')
    net = MobileNetv2(params)
    print('Initialization Done.\n')

    # create dataset and transformation
    print('Loading Data......')
    dataset = create_dataset(params)
    print('Data Loaded.\n')

    # let's start to train!
    net.train_n_epoch(dataset)
Beispiel #9
0
    def __init__(self):
        self.paths = Paths()
        self.param = Params()
        self.pc = PrintConfig()
        self.am = AudioManager()

        self.p = Printer(1)
        self.S = Synthesizer()
        self.pickle = Pickle(self.paths.pickle)
        self.decoded, self.original, self.coded = self.loadAll()
        self.cP, self.cG, self.cLpc = self.organize()
        self.cSn = self.SynthAll()
Beispiel #10
0
 def __init__(self):
     self.paths = Paths()
     self.param = Params()
     self.pc = PrintConfig()
     self.p = Printer(1)
     self.am = AudioManager()
     self.m = Math()
     self.pickle = Pickle(self.paths.pickle,
                          lTag=self.paths.tag4,
                          sTag=self.paths.tag5)
     self.cc = CodeConfig()
     self.cu = CodingUtils()
     self.encoded = self.pickle.LoadEncoded()
Beispiel #11
0
def run():
    params = Params()

    grid_data = get_data(params.data_params)
    data_dict = dataset_split(grid_data,
                              params.data_params['test_ratio'],
                              params.data_params['val_ratio'])

    batch_gens = create_generator(data_dict, params.model_params['TRAJGRU']['batch_params'])

    # for batch_idx, (x, y) in enumerate(batch_gens['train'].batch_next()):
    #     print(batch_idx, x.shape)

    trained_model = trainer(batch_gens, **params.model_params['TRAJGRU'])

    print('Training finished, saving the model')
    model_file = open('results/conv_lstm.pkl', 'wb')
    pickle.dump(trained_model, model_file)
Beispiel #12
0
def predict():

    params = Params()

    model_path = 'checkpoints/Checkpoint_epoch_150.pth.tar'
    model = MobileNetv2_DeepLabv3Model(params).to(device)
    checkpoint = torch.load(model_path)
    state_dict = checkpoint['state_dict']
    new_dict = {}
    for k in state_dict.keys():
        # print('K: {}, '.format(k))
        new_dict['model.' + k] = state_dict[k]
    model.load_state_dict(new_dict)
    model.eval()
    print('Model loaded.')

    img_fs = [
        'images/berlin_000004_000019_leftImg8bit.png',
        'images/berlin_000002_000019_leftImg8bit.png',
    ]
    transform = transforms.Compose(
        [transforms.Resize(image_size),
         transforms.ToTensor()])
    for img_f in img_fs:
        img = cv2.imread(img_f)
        inp = Variable(transform(Image.fromarray(img)).to(device).unsqueeze(0))
        print(inp.size())
        out = model(inp)
        print(out.size())

        _, predictions = torch.max(out.data, 1)
        prediction = predictions.cpu().numpy()[0]
        print(prediction)
        mask_color = np.asarray(label_to_color_image(prediction, 'cityscapes'),
                                dtype=np.uint8)
        frame = cv2.resize(img, (1024, 512))
        print('msk: {}, frame: {}'.format(mask_color.shape, frame.shape))
        res = cv2.addWeighted(frame, 0.5, mask_color, 0.7, 1)

        cv2.imshow('res', res)
        while True:
            cv2.waitKey(27)
Beispiel #13
0
    def __init__(self, num_inputs, action_space, params=Params()):
        """ Initis Actor Critic """
        super(ActorCritic, self).__init__()
        self.params = params

        logger.info("Init ActorCritic num_inputs: %s and action_space: %s" %
                    (num_inputs, action_space))
        hidden_layer_neuron_size = self.params.board_size * self.params.board_size
        self.fc1 = nn.Linear(self.params.board_size, hidden_layer_neuron_size)
        self.fc2 = nn.Linear(hidden_layer_neuron_size,
                             hidden_layer_neuron_size)
        self.fc3 = nn.Linear(hidden_layer_neuron_size, num_inputs)

        self.lstm = nn.LSTMCell(
            self.params.board_dimension_size * self.params.board_size,
            self.params.lstm_size)

        # num_outputs = int(action_space.high[0])  # action_space.num_discrete_space
        num_outputs = int(action_space.n)  # action_space.num_discrete_space

        self.critic_linear = nn.Linear(256, 1)  # output = V(S)
        self.actor_linear = nn.Linear(256, num_outputs)  # output = Q(S, a)

        self.apply(init_weights)

        self.actor_linear.weight.data = normalized_columns_initializer(
            self.actor_linear.weight.data, 0.01)
        self.actor_linear.bias.data.fill_(0)

        self.critic_linear.weight.data = normalized_columns_initializer(
            self.critic_linear.weight.data, 0.01)
        self.critic_linear.bias.data.fill_(0)

        self.lstm.bias_ih.data.fill_(0)
        self.lstm.bias_hh.data.fill_(0)

        self.train()
Beispiel #14
0
    #基础程序设置
    seed=789
    torch.manual_seed(seed)#seed: long
    np.random.seed(seed)
    torch.cuda.manual_seed(seed)

    #设备信息
    print('GPU available: ',torch.cuda.is_available())
    print('CUDNN available: ',torch.backends.cudnn.enabled)
    print('GPU number: ',torch.cuda.device_count())

    # parser = OptionParser()
    # parser.add_option("--train", dest="trainFile",
    #                   default="", help="train dataset")
    # parser.add_option("--dev", dest="devFile",
    #                   default="", help="dev dataset")
    # parser.add_option("--test", dest="testFile",
    #                   default="", help="test dataset")

    # (options, args) = parser.parse_args()

    params=Params()
    if not torch.cuda.is_available() or not torch.backends.cudnn.enabled or torch.cuda.device_count()<=0:
        params.use_gpu=False
    params.device=torch.device("cuda:"+str(params.device_id) if params.use_gpu else 'cpu')
    params.show()

    master=Nebuchadnezzar(params)
    master.data_process()
    master.train_process()
      # Create the Timeline object, and write it to a json file
      fetched_timeline = timeline.Timeline(run_metadata.step_stats)
      chrome_trace = fetched_timeline.generate_chrome_trace_format()
      with open(os.path.join(params.log_dir, 'timeline_01.json'), 'w') as f:
        f.write(chrome_trace)
      with open(os.path.join(params.log_dir, 'mem_info.json'), 'w') as f:
        f.write(str(run_metadata))

    else:
      loss_value, _ = sess.run([loss, train_op], feed_dict=feed_dict)

    if step % params.ckpt_save_steps == 0:
      model.save(saver, sess, params.log_dir, step)

    duration = time.time() - start_time
    print('step {:d} \t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))

  coord.request_stop()
  coord.join(threads)

if __name__ == '__main__':
  args = get_arguments()
  params = Params()
  params = load_json_to_params(params, args.json_path)
  params.dataset_directory = '/home/ddegeus/datasets/Cityscapes/training/'
  params.filelist_filepath = '/home/ddegeus/datasets/Cityscapes/training/panoptic/filenames.lst'

  params.is_training = True
  params.batch_norm_istraining = True
  print(params)
  train(params)
        out_file = np.stack([label_ids, instance_ids], axis=2)

        print(np.min(label_ids), np.max(label_ids))

        save_dir = params.save_dir
        im_name_base = os.path.splitext(os.path.basename(str(filename)))[0]
        out_fname = os.path.join(save_dir, im_name_base + '.png')
        print(out_fname)

        print(np.min(out_file[..., 0]), np.max(out_file[..., 0]))

        Image.fromarray(out_file.astype(np.uint8)).save(out_fname)

if __name__ == '__main__':
  args = get_arguments()
  params = Params()
  params = load_json_to_params(params, args.json_path)
  params.num_steps_predict = params.num_steps_eval
  params.save_predictions = args.save_predictions
  params.save_dir = args.save_dir

  params.is_training = False
  params.batch_norm_istraining = False
  params.height_input = 512
  params.width_input = 1024
  params.height_orig = 604
  params.width_orig = 960
  params.Nb = 1

  filenames_list = list()
  for file in os.listdir(args.image_dir):
        time_str = datetime.datetime.now().isoformat()
        if verbose:
            if random.randint(0, 10) < 1:
                print(("%s: DIS step %d, loss %f with acc %f " %
                       (time_str, step, current_loss, accuracy)))

    def getReward(self, batch, sess, verbose=True):
        feed_dict = {
            self.input_x_1: batch[:, 0],
            self.input_x_2: batch[:, 1],
            self.input_x_3: batch[:, 2],
            self.dropout_keep_prob_holder: 1.0
        }
        scores = sess.run(self.reward, feed_dict)
        return scores


if __name__ == "__main__":

    from config import Params
    opts = Params()
    opts.parseArgs()

    from dataHelper import InsuranceQA
    dataset = InsuranceQA(opts)

    model = Discriminator(opts)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    batch = next(iter(dataset.generate_uniform_pair()))
Beispiel #18
0
from envs import create_expansionai_env
from model import ActorCritic
from test import test
from train import train

LOGGING_FORMAT = '%(asctime)s - %(name)s - %(thread)d|%(process)d - %(levelname)s - %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)

# logging.getLogger('Model').setLevel(logging.INFO)
# logging.getLogger('ExpansionAiEnv').setLevel(logging.DEBUG)
logging.getLogger('Train').setLevel(logging.INFO)
logging.getLogger('Test').setLevel(logging.INFO)

# Main run
os.environ['OMP_NUM_THREADS'] = '1'  # 1 thread per core
params = Params(
)  # creating the params object from the Params class, that sets all the model parameters
params.max_episode_length = 1_000_000
params.num_processes = 3

torch.manual_seed(params.seed)  # setting the seed (not essential)
env = create_expansionai_env(
    params.env_name,
    params)  # we create an optimized environment thanks to universe

# shared_model is the model shared by the different agents (different threads in different cores)
shared_model = ActorCritic(env.observation_space.shape[0], env.action_space)
# storing the model in the shared memory of the computer, which allows the threads to have access to this shared memory even if they are in different cores
shared_model.share_memory()

# the optimizer is also shared because it acts on the shared model
optimizer = my_optim.SharedAdam(shared_model.parameters(), lr=params.lr)
Beispiel #19
0
        transforms.Compose([
            RandomResizedCrop(params.image_size, scale=(0.5, 2.0)),
            RandomHorizontalFlip(),
            ToTensor()
        ]),
        'val':
        transforms.Compose([
            RandomResizedCrop(params.image_size, scale=(0.5, 2.0)),
            ToTensor()
        ]),
        'test':
        transforms.Compose([ToTensor()])
    }

    # file_dir = {p: os.path.join(params.dataset_root, p) for p in phase}

    # datasets = {Cityscapes(file_dir[p], mode=p, transforms=transform[p]) for p in phase}
    datasets = {
        p: Cityscapes(params.dataset_root, mode=p, transforms=transform[p])
        for p in phase
    }

    return datasets


if __name__ == '__main__':
    from config import Params
    pp = Params()
    pp.dataset_root = '/media/ubuntu/disk/cityscapes'
    datasets = create_datasets(pp)
Beispiel #20
0
from optparse import OptionParser
from config import Params
from Pythagoras_master import Pythagoras

if __name__ == "__main__":

    parser = OptionParser()

    parser.add_option("--train",
                      dest="trainFile",
                      default="",
                      help="train dataset")

    parser.add_option("--dev", dest="devFile", default="", help="dev dataset")

    parser.add_option("--test",
                      dest="testFile",
                      default="",
                      help="test dataset")

    (options, args) = parser.parse_args()

    params = Params()
    if options.trainFile != "":
        params.process_params.trainFile = options.trainFile
    if options.devFile != "":
        params.process_params.devFile = options.devFile
    if options.testFile != "":
        params.process_params.testFile = options.testFile
    master = Pythagoras(params)
    master.train()
      edges_invert = edges_invert.astype(np.uint8)

      class_colors = class_colors.astype(np.uint8) * np.expand_dims(edges_invert, axis=2) + np.expand_dims(
        edges_total, axis=2)

      img_obj = Image.fromarray(np.uint8(class_colors))

      ax.imshow(img_obj)
      plt.waitforbuttonpress(timeout=5)




if __name__ == '__main__':
  args = get_arguments()
  params = Params()
  params = load_json_to_params(params, args.json_path)
  params.dataset_directory = '/home/ddegeus/datasets/Cityscapes/validation/'
  params.filelist_filepath = '/home/ddegeus/datasets/Cityscapes/validation/panoptic/filenames.lst'
  params.is_training = False
  params.batch_norm_istraining = False
  params.num_steps_predict = params.num_steps_eval
  params.height_input = 512
  params.width_input = 1024
  params.Nb = 1

  predict(params)