コード例 #1
0
    def __init__(self,
                 options=None,
                 title='EmdrosApplication',
                 DO_REF=True,
                 DO_OUT=True,
                 DO_LBL=True):
        if options is None:
            options = Options()
        kernel_cfg_name = options.get('kernel')
        if kernel_cfg_name is not None:
            kernel_cfg_name = 'emdros_application.syscfg.' + re.sub(
                '\.py[c]?$', '', kernel_cfg_name)
        else:
            kernel_cfg_name = syscfg.config.DEFAULT_KERNEL
        import importlib
        kernel = importlib.import_module(kernel_cfg_name)
        self.kernel = kernel
        #kernel = __import__(kernel_cfg_name)

        self.title = title
        self.DO_REF = DO_REF
        self.DO_OUT = DO_OUT
        self.DO_LBL = DO_LBL
        self.title = title

        self.spinner = Spinner()

        if options is None:
            self.options = Options(
                addPathAndExt('options', kernel.CFG_DIR, kernel.CFG_EXT))
        else:
            self.options = options

        self.cfg = self.configure(self.options, kernel)
        self.modeCfgs = self.setupModeConfigurations(kernel)

        self.mql = MQLEngine(database=self.database,
                             usr=self.usr,
                             pwd=self.pwd,
                             be=self.backend,
                             domainfile=self.domainqueryfile,
                             domain=self.domain,
                             VERBOSE=self.VERBOSE,
                             verbose=self.verbose,
                             test=self.test,
                             outstream=self.outstream,
                             errstream=self.errstream,
                             kernel=kernel)

        if self.DO_OUT or self.DO_REF:
            self.ref, self.out = self.setupOutput(kernel=kernel)

        if self.DO_LBL:
            self.lbl = self.setupLabelManagers(options.args, kernel=kernel)

        if self.options.get('gui'):
            self.gui = GUI(title=title, app=self)
            self.gui.mainloop()
        else:
            self.gui = None
コード例 #2
0
def plt_heat_map_results(plt_file_name = None):
    opt = Options()
    opt.disp_on = False
    pob_siz_len = np.arange(start=3, stop=11,step=2) #not even steps needed
    hist_len = np.arange(start=1, stop=5)
    # pob_siz_len =[5,3,5,9,1]
    results_success_rate_map_zero = []
    results_astar_diff = []
    cnt_Test=0
    with K.get_session():
        for p in pob_siz_len:
            opt.pob_siz = p
            opt.state_siz = (p * opt.cub_siz) ** 2
            print("start with opt.pob_siz: {}".format(opt.pob_siz))
            print("start with opt.state_siz: {}".format(opt.state_siz))
            # get_data(opt)#generaten new data
            for l in hist_len:
                opt.hist_len = l
                print("start with opt.hist_len: {}".format(opt.hist_len))
                # train_model(opt, mdl_name,epochs=EPOCHS)
                # [success_rate, astar_diff] = test_model(opt,mdl_name)
                # results_success_rate_map_zero.append(success_rate)

                results_success_rate_map_zero.append(cnt_Test)
                cnt_Test1+=1
                # results_astar_diff.append(astar_diff)
    results_success_rate_map_zero=np.array(results_success_rate_map_zero)
    plt.imshow(results_success_rate_map_zero.reshape(len(pob_siz_len),len(hist_len)), cmap='hot', interpolation='nearest')
    plt.colorbar()
    # plt.show()
    helper_save(plt_file_name)
コード例 #3
0
def evaluate():
    if mx.context.num_gpus() > 0:
        ctx = mx.gpu()
    else:
        ctx = mx.cpu(0)

    # loading configs
    args = Options().parse()
    cfg = Configs(args.config_path)
    # set logging level
    logging.basicConfig(level=logging.INFO)

    # images
    content_image = tensor_load_rgbimage(cfg.content_image,
                                         ctx,
                                         size=cfg.val_img_size,
                                         keep_asp=True)
    style_image = tensor_load_rgbimage(cfg.style_image,
                                       ctx,
                                       size=cfg.val_style_size)
    style_image = preprocess_batch(style_image)
    # model
    style_model = Net(ngf=cfg.ngf)
    style_model.collect_params().load(cfg.val_model, ctx=ctx)
    # forward
    output = style_model(content_image, style_image)
    # save img
    tensor_save_bgrimage(output[0], cfg.output_img)
    logging.info("Save img to {}".format(cfg.output_img))
コード例 #4
0
def plt_pob_siz_results(plt_file_name = None):
    mdl_name = "list_len_mdl.h5"
    opt = Options()
    pob_siz_len = np.arange(start=3, stop=11,step=2) #not even steps needed
    # pob_siz_len =[5,3,5,9,1]
    results_success_rate_map_zero = []
    results_astar_diff = []


    for p in pob_siz_len:
        opt.pob_siz = p
        opt.state_siz = (p * opt.cub_siz) ** 2
        print("start with opt.pob_siz: {}".format(opt.pob_siz))
        print("start with opt.state_siz: {}".format(opt.state_siz))
        map_ind = opt.map_ind
        opt.map_ind = 0
        get_data(opt)#generaten new data with the map 0
        opt.map_ind = map_ind
        train_model(opt, mdl_name, epochs=EPOCHS)
        [success_rate, astar_diff] = test_model(opt,mdl_name)
        results_success_rate_map_zero.append(success_rate)
        results_astar_diff.append(astar_diff)

    # Four polar axes
    f, axarr = plt.subplots(1,2)
    axarr[0].scatter(pob_siz_len, results_success_rate_map_zero)
    axarr[0].set_ylabel(r'success rate',usetex=True)
    axarr[1].scatter(pob_siz_len, results_astar_diff)
    # axarr[1].set_title('difference to astar in number of steps')
    axarr[1].set_ylabel(r'mean difference to astar in steps',usetex=True)

    axarr[0].set_xlabel(r'view size',usetex=True)
    axarr[1].set_xlabel(r'view size',usetex=True)
    helper_save(plt_file_name)
コード例 #5
0
ファイル: viewdm3.py プロジェクト: ovidiopr/DM3Viewer
 def loadOptions(self):
     '''create the self.options object from values stored in the settings'''
     self.options = Options()
     for opt, dflt in zip(self.options.optlist, self.options.dfltlist):
         if isinstance(dflt, (str, unicode)):
             setattr(
                 self.options, opt,
                 unicode(
                     self.settings.value(
                         'Options/' + opt,
                         QtCore.QVariant(QtCore.QString(dflt))).toString()))
         elif isinstance(dflt, float):
             setattr(
                 self.options, opt,
                 self.settings.value('Options/' + opt,
                                     QtCore.QVariant(dflt)).toDouble()[0])
         elif isinstance(dflt, bool):
             setattr(
                 self.options, opt,
                 self.settings.value('Options/' + opt,
                                     QtCore.QVariant(dflt)).toBool())
         elif isinstance(dflt, int):
             setattr(
                 self.options, opt,
                 self.settings.value('Options/' + opt,
                                     QtCore.QVariant(dflt)).toInt()[0])
         else:
             raise ValueError('Unsupported type in option "%s"' % dflt)
コード例 #6
0
ファイル: cnn.py プロジェクト: AxelInd/DeepLearningLab_ANN
def get_estimator():
    opt = Options()

    model_dir = "./cnn_model/hist_len_{}_pob_siz_{}_cub_siz_{}".format(
        opt.hist_len, opt.pob_siz, opt.cub_siz)

    return tf.estimator.Estimator(model_fn=cnn.cnn_model_fn,
                                  model_dir=model_dir)
コード例 #7
0
def get_data(opt=Options()):
    sim = Simulator(opt.map_ind, opt.cub_siz, opt.pob_siz, opt.act_num)
    states = np.zeros([opt.data_steps, opt.state_siz], float)
    labels = np.zeros([opt.data_steps], int)

    # Note I am forcing the display to be off here to make data collection fast
    # you can turn it on again for debugging purposes
    # opt.disp_on = False

    # 1. control loop
    if opt.disp_on:
        win_all = None
        win_pob = None
    epi_step = 0  # #steps in current episode
    nepisodes = 1  # total #episodes executed

    state = sim.newGame(opt.tgt_y, opt.tgt_x)
    for step in range(opt.data_steps):
        if state.terminal or epi_step >= opt.early_stop:
            epi_step = 0
            nepisodes += 1
            state = sim.newGame(opt.tgt_y, opt.tgt_x)
        else:
            state = sim.step()  # will perform A* actions

        # save data & label
        states[step, :] = rgb2gray(state.pob).reshape(opt.state_siz)
        labels[step] = state.action

        epi_step += 1

        if step % opt.prog_freq == 0:
            print(step)

        if opt.disp_on:
            if win_all is None:
                import pylab as pl
                pl.figure()
                win_all = pl.imshow(state.screen)
                pl.figure()
                win_pob = pl.imshow(state.pob)
            else:
                win_all.set_data(state.screen)
                win_pob.set_data(state.pob)
            pl.pause(opt.disp_interval)
            pl.draw()

    # 2. save to disk
    print('saving data ...')
    np.savetxt(opt.states_fil, states, delimiter=',')
    np.savetxt(opt.labels_fil, labels, delimiter=',')
    print("states saved to " + opt.states_fil)
    print("labels saved to " + opt.labels_fil)
コード例 #8
0
def main(unused_argv):
	opt = Options()
	sim = Simulator(opt.map_ind, opt.cub_siz, opt.pob_siz, opt.act_num)
	trans = TransitionTable(opt.state_siz, opt.act_num, opt.hist_len,
	                             opt.minibatch_size, opt.valid_size,
	                             opt.states_fil, opt.labels_fil)

	# 1. train
	######################################
	# TODO implement your training here!
	# you can get the full data from the transition table like this:
	#
	# # both train_data and valid_data contain tupes of images and labels
	train_data = trans.get_train()
	valid_data = trans.get_valid()

	samples_train_data = np.float32(train_data[0])
	labels_train_data = np.float32(train_data[1])
	unhotted_labels_train_data = unhot(labels_train_data)

	samples_valid_data = np.float32(valid_data[0])
	labels_valid_data = np.float32(valid_data[1])
	unhotted_labels_valid_data = unhot(labels_valid_data)

	print("Shape of samples_train_data {}".format(samples_train_data.shape))
	print("Shape of labels_train_data {}".format(labels_train_data.shape))
	print("Shape of unhotted_labels_train_data {}".format(unhotted_labels_train_data.shape))

	classifier = cnn.get_estimator()

	# Train the model
	train_input_fn = tf.estimator.inputs.numpy_input_fn(
	    x={"x": samples_train_data},
	    y=unhotted_labels_train_data,
	    batch_size=100,
	    num_epochs=None,
	    shuffle=True)

	classifier.train(
	    input_fn=train_input_fn,
	    steps=1000
	)

	eval_input_fn = tf.estimator.inputs.numpy_input_fn(
		x={"x": samples_valid_data},
		y=unhotted_labels_valid_data,
		num_epochs=1,
		shuffle=False
	)

	eval_results = classifier.evaluate(input_fn=eval_input_fn)
	print(eval_results)
コード例 #9
0
def model(input_shape, model_name='', visualize_model=False):
    opt = Options()

    model = Sequential()
    model.add(
        Convolution2D(input_shape=input_shape,
                      filters=default_n_filters,
                      kernel_size=default_kernel_size,
                      activation='relu',
                      padding='same',
                      name='ConvLayer1'))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dense(opt.act_num))
    model.compile(loss='mse', metrics=['acc'], optimizer=Adam(lr=1e-6))

    if visualize_model:
        print("Saving model diagram")
        plot_model(model,
                   to_file=opt.model_path.format(model_name),
                   show_shapes=True)
    return model
コード例 #10
0
def plt_hist_len_results(plt_file_name = None, opt = Options()):
    mdl_name = "list_len_mdl.h5"
    hist_len = np.arange(start=1, stop=11)
    results_success_rate_map_zero = []
    results_astar_diff = []

    map_ind = opt.map_ind
    opt.map_ind = 0
    #opt.change_tgt = False
    get_data(opt)#generaten new data with the map 0
    opt.map_ind = map_ind
    print("map used {}".format(opt.map_ind))
    #opt.change_tgt = True
    for l in hist_len:
        opt.hist_len = l
        print("start with opt.hist_len: {}".format(opt.hist_len))
        train_model(opt, mdl_name,epochs=EPOCHS)

        [success_rate, astar_diff] = test_model(opt,mdl_name)
        results_success_rate_map_zero.append(success_rate)
        results_astar_diff.append(astar_diff)

    f, axarr = plt.subplots(1,2)
    axarr[0].scatter(hist_len, results_success_rate_map_zero)
    # axarr[0].set_title('map 1')
    axarr[0].set_ylabel(r'success rate',usetex=True)

    axarr[1].scatter(hist_len, results_astar_diff)
    # axarr[1].set_title('map 1')
    axarr[1].set_ylabel(r'mean difference to astar in steps',usetex=True)

    axarr[1].set_xlabel(r'history length',usetex=True)
    axarr[0].set_xlabel(r'history length',usetex=True)

    # Fine-tune figure; make subplots farther from each other.
    f.subplots_adjust(hspace=0.3)
    helper_save(plt_file_name)
コード例 #11
0
    def start_eval(self, speicherort, display):
        # 0. initialization
        self.opt = Options()
        sim = SimulatorDeterministicStart(self.opt.map_ind, self.opt.cub_siz,
                                          self.opt.pob_siz, self.opt.act_num)
        imported_meta = tf.train.import_meta_graph(speicherort)

        win_all = None
        win_pob = None

        def_graph = tf.get_default_graph()

        with tf.Session() as sess:
            with tf.variable_scope("new_testing_scope", reuse=tf.AUTO_REUSE):

                x = sess.graph.get_tensor_by_name('x:0')
                Q = tf.get_collection("Q")[0]

                imported_meta.restore(sess,
                                      tf.train.latest_checkpoint('./weights/'))

                maxlen = 100000

                # initialize the environment
                state = sim.newGame(self.opt.tgt_y, self.opt.tgt_x, 0)
                state_with_history = np.zeros(
                    (self.opt.hist_len, self.opt.state_siz))
                self.append_to_hist(
                    state_with_history,
                    rgb2gray(state.pob).reshape(self.opt.state_siz))
                next_state_with_history = np.copy(state_with_history)
                trans = TransitionTable(self.opt.state_siz, self.opt.act_num,
                                        self.opt.hist_len,
                                        self.opt.minibatch_size, maxlen)
                epi_step = 0

                episodes = 0

                solved_epoisodes = 0

                step_sum = 0
                # train for <steps> steps
                while True:

                    # goal check
                    if state.terminal or epi_step >= self.opt.early_stop:
                        if state.terminal:
                            solved_epoisodes += 1
                        episodes += 1
                        step_sum = step_sum + epi_step
                        epi_step = 0

                        # reset the game
                        try:
                            state = sim.newGame(self.opt.tgt_y, self.opt.tgt_x,
                                                episodes)
                        except:
                            return (step_sum, solved_epoisodes)

                        # and reset the history
                        state_with_history[:] = 0
                        self.append_to_hist(
                            state_with_history,
                            rgb2gray(state.pob).reshape(self.opt.state_siz))
                        next_state_with_history = np.copy(state_with_history)

                        if display:
                            if win_all is None:
                                plt.subplot(121)
                                win_all = plt.imshow(state.screen)
                                plt.subplot(122)
                                win_pob = plt.imshow(state.pob)
                            else:
                                win_all.set_data(state.screen)
                                win_pob.set_data(state.pob)
                            plt.pause(self.opt.disp_interval)
                            plt.draw()

                    epi_step += 1

                    # format state for network input
                    input_reshaped = self.reshapeInputData(
                        state_with_history, 1)
                    # create batch of input state
                    input_batched = np.tile(input_reshaped,
                                            (self.opt.minibatch_size, 1, 1, 1))

                    ### take one action per step
                    qvalues = sess.run(Q, feed_dict={
                        x: input_batched
                    })[0]  # take the first batch entry
                    action = np.argmax(qvalues)
                    action_onehot = trans.one_hot_action(action)
                    # apply action
                    next_state = sim.step(action)
                    # append to history
                    self.append_to_hist(
                        next_state_with_history,
                        rgb2gray(next_state.pob).reshape(self.opt.state_siz))
                    # add to the transition table
                    trans.add(state_with_history.reshape(-1), action_onehot,
                              next_state_with_history.reshape(-1),
                              next_state.reward, next_state.terminal)
                    # mark next state as current state
                    state_with_history = np.copy(next_state_with_history)
                    state = next_state

                    if display:
                        if win_all is None:
                            plt.subplot(121)
                            win_all = plt.imshow(state.screen)
                            plt.subplot(122)
                            win_pob = plt.imshow(state.pob)
                        else:
                            win_all.set_data(state.screen)
                            win_pob.set_data(state.pob)
                        plt.pause(self.opt.disp_interval)
                        plt.draw()
コード例 #12
0
    for i in range(opt.minibatch_size):
        action = np.argmax(action_batch[i])

        Q[i, action] = ((1 - terminal_batch[i]) * opt.discount *
                        np.max(Q_s_next[i])) + reward_batch[i]
    # Train agent on new Q
    loss = agent.train_on_batch(state_batch, Q)
    return loss


# --------------------------------------------------------
# CLI
# --------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument("model", type=str, help="Name for run / model ")
opt = Options("")  # Temp for params
parser.add_argument(
    "-s",
    "--steps",
    help=
    "(Optional) Number of steps to train the model for. Default is 10 ** 6.",
    type=int,
    default=opt.steps)

args = parser.parse_args()
if args.model:
    model_name = args.model
else:
    model_name = "model" + date.today().strftime("%d_%B_%Y_%I_%M_%p")

# --------------------------------------------------------
コード例 #13
0
ファイル: test_agent.py プロジェクト: RedHeadM/dl_lab_2017
def test_model(opt = Options(),mdl_load_name='my_model.h5'):
    """validation to astar
        return [success_rate, astar_diff]
    """
    # 0. initialization

    sim = Simulator(opt.map_ind, opt.cub_siz, opt.pob_siz, opt.act_num)
    print(opt.state_siz)
    trans = TransitionTable(opt.state_siz, opt.act_num, opt.hist_len,
                                 opt.minibatch_size, opt.valid_size,
                                 opt.states_fil, opt.labels_fil)
    state_length = (opt.cub_siz*opt.pob_siz)
    state_history = np.zeros((1,state_length,state_length,opt.hist_len))
    #load traind mdl
    model = load_model(mdl_load_name)

    # 1. control loop
    if opt.disp_on:
        win_all = None
        win_pob = None
    epi_step = 0    # #steps in current episode
    nepisodes = 0   # total #episodes executed
    nepisodes_solved = 0
    action = 0     # action to take given by the network

    # start a new game
    state = sim.newGame(opt.tgt_y, opt.tgt_x)
    astar_num_steps = get_astar_steps(copy.deepcopy(sim))

    astar_num_steps_arr = []
    agent_num_steps_arr = []

    for step in range(opt.eval_steps):

        # check if episode ended
        if state.terminal or epi_step >= opt.early_stop:
            if state.terminal:
                nepisodes_solved += 1
            print("astar_num_steps: {} agent steps: {} ".format(astar_num_steps,epi_step))
            astar_num_steps_arr.append(astar_num_steps)
            agent_num_steps_arr.append(epi_step)
            nepisodes += 1
            # start a new game
            state = sim.newGame(opt.tgt_y, opt.tgt_x)
            astar_num_steps = get_astar_steps(copy.deepcopy(sim))

            epi_step = 0
        else:
            #   here you would let your agent take its action
            gray_state = rgb2gray(state.pob)
            gray_state = gray_state.reshape(1,opt.state_siz)
            trans.add_recent(step, gray_state)
            recent = trans.get_recent()
            recent_shaped = recent.reshape(1,state_length,state_length,opt.hist_len)
            action = np.argmax(model.predict(recent_shaped))
            state = sim.step(action)

            epi_step += 1

        if step % opt.prog_freq == 0:
            print("step {}".format(step))

        if opt.disp_on:
            if win_all is None:
                plt.subplot(121)
                win_all = plt.imshow(state.screen)
                plt.subplot(122)
                win_pob = plt.imshow(state.pob)
            else:
                win_all.set_data(state.screen)
                win_pob.set_data(state.pob)
            plt.pause(opt.disp_interval)
            plt.draw()

    # 2. calculate statistics
    success_rate = float(nepisodes_solved) / float(nepisodes)
    print("this session was: {}".format(success_rate))
    # 3. additional analysis

    agent_num_steps_arr=np.array(agent_num_steps_arr)
    astar_num_steps_arr=np.array(astar_num_steps_arr)
    astar_num_steps_arr[astar_num_steps_arr == None] = 0 #set to zero if start was on goal
    #only compute mead diff to astare where goal found

    print("sahpe form ",astar_num_steps_arr.shape)
    astar_num_steps_arr = astar_num_steps_arr[agent_num_steps_arr< opt.early_stop]
    print("sahpe to",astar_num_steps_arr.shape)
    #change after astar_num_steps_arr
    agent_num_steps_arr = agent_num_steps_arr[agent_num_steps_arr< opt.early_stop]
    astar_diff = np.mean(agent_num_steps_arr-astar_num_steps_arr)
    print("avg diff to astar: {}".format(astar_diff))
    return [success_rate, astar_diff]
コード例 #14
0
def train():
    if mx.context.num_gpus() > 0:
        ctx = mx.gpu()
    else:
        raise RuntimeError('There is no GPU device!')

    # loading configs
    args = Options().parse()
    cfg = Configs(args.config_path)
    # set logging level
    logging.basicConfig(level=logging.INFO)
    # set random seed
    np.random.seed(cfg.seed)

    # build dataset and loader
    content_dataset = ImageFolder(cfg.content_dataset, cfg.img_size, ctx=ctx)
    style_dataset = StyleLoader(cfg.style_dataset, cfg.style_size, ctx=ctx)
    content_loader = gluon.data.DataLoader(content_dataset, batch_size=cfg.batch_size, \
                                            last_batch='discard')

    vgg = Vgg16()
    vgg._init_weights(fixed=True, pretrain_path=cfg.vgg_check_point, ctx=ctx)

    style_model = Net(ngf=cfg.ngf)
    if cfg.resume is not None:
        print("Resuming from {} ...".format(cfg.resume))
        style_model.collect_params().load(cfg.resume, ctx=ctx)
    else:
        style_model.initialize(mx.initializer.MSRAPrelu(), ctx=ctx)
    print("Style model:")
    print(style_model)

    # build trainer
    lr_sche = mx.lr_scheduler.FactorScheduler(
        step=170000,
        factor=0.1,
        base_lr=cfg.base_lr
        #warmup_begin_lr=cfg.base_lr/3.0,
        #warmup_steps=300,
    )
    opt = mx.optimizer.Optimizer.create_optimizer('adam', lr_scheduler=lr_sche)
    trainer = gluon.Trainer(style_model.collect_params(), optimizer=opt)

    loss_fn = gluon.loss.L2Loss()

    logging.info("Start training with total {} epoch".format(cfg.total_epoch))
    iteration = 0
    total_time = 0.0
    num_batch = content_loader.__len__() * cfg.total_epoch
    for epoch in range(cfg.total_epoch):
        sum_content_loss = 0.0
        sum_style_loss = 0.0
        for batch_id, content_imgs in enumerate(content_loader):
            iteration += 1
            s = time.time()
            style_image = style_dataset.get(batch_id)

            style_vgg_input = subtract_imagenet_mean_preprocess_batch(
                style_image.copy())
            style_image = preprocess_batch(style_image)
            style_features = vgg(style_vgg_input)
            style_features = [
                style_model.gram.gram_matrix(mx.nd, f) for f in style_features
            ]

            content_vgg_input = subtract_imagenet_mean_preprocess_batch(
                content_imgs.copy())
            content_features = vgg(content_vgg_input)[1]

            with autograd.record():
                y = style_model(content_imgs, style_image)
                y = subtract_imagenet_mean_batch(y)
                y_features = vgg(y)

                content_loss = 2 * cfg.content_weight * loss_fn(
                    y_features[1], content_features)
                style_loss = 0.0
                for m in range(len(y_features)):
                    gram_y = style_model.gram.gram_matrix(mx.nd, y_features[m])
                    _, C, _ = style_features[m].shape
                    gram_s = mx.nd.expand_dims(style_features[m],
                                               0).broadcast_to((
                                                   gram_y.shape[0],
                                                   1,
                                                   C,
                                                   C,
                                               ))
                    style_loss = style_loss + 2 * cfg.style_weight * loss_fn(
                        gram_y, gram_s)
                total_loss = content_loss + style_loss
                total_loss.backward()

            trainer.step(cfg.batch_size)
            mx.nd.waitall()
            e = time.time()
            total_time += e - s
            sum_content_loss += content_loss[0]
            sum_style_loss += style_loss[0]
            if iteration % cfg.log_interval == 0:
                itera_sec = total_time / iteration
                eta_str = str(
                    datetime.timedelta(seconds=int((num_batch - iteration) *
                                                   itera_sec)))
                mesg = "{} Epoch [{}]:\t[{}/{}]\tTime:{:.2f}s\tETA:{}\tlr:{:.4f}\tcontent: {:.3f}\tstyle: {:.3f}\ttotal: {:.3f}".format(
                    time.strftime("%H:%M:%S",
                                  time.localtime()), epoch + 1, batch_id + 1,
                    content_loader.__len__(), itera_sec, eta_str,
                    trainer.optimizer.learning_rate,
                    sum_content_loss.asnumpy()[0] / (batch_id + 1),
                    sum_style_loss.asnumpy()[0] / (batch_id + 1),
                    (sum_content_loss + sum_style_loss).asnumpy()[0] /
                    (batch_id + 1))
                logging.info(mesg)
                ctx.empty_cache()
        save_model_filename = "Epoch_" + str(epoch + 1) +  "_" + str(time.ctime()).replace(' ', '_') + \
                "_" + str(cfg.content_weight) + "_" + str(cfg.style_weight) + ".params"
        if not os.path.isdir(cfg.save_model_dir):
            os.mkdir(cfg.save_model_dir)
        save_model_path = os.path.join(cfg.save_model_dir, save_model_filename)
        logging.info("Saving parameters to {}".format(save_model_path))
        style_model.collect_params().save(save_model_path)
コード例 #15
0
            train(model, criterion, update_optimizer, pos_data, neg_data, opts.maxiter_update, opts)

        torch.cuda.empty_cache()


if __name__ == "__main__":

    parser = argparse.ArgumentParser()
    parser.add_argument('-s', '--seq', default='./datasets/DragonBaby', help='input seq')

    args = parser.parse_args()

    np.random.seed(0)
    torch.manual_seed(0)

    options = Options()
    dataset = Path(args.seq)

    images = list(sorted(dataset.joinpath('img').glob('*.jpg')))
    ground_truths = pd.read_csv(str(dataset.joinpath('groundtruth_rect.txt')), header=None).values

    # Run tracker
    for i, (result, (x, y, w, h), overlap, score) in \
            enumerate(main(images, ground_truths[0], ground_truths, options), 1):
        image = np.asarray(Image.open(images[i]).convert('RGB'))

        print(i, result)

        gx, gy, gw, gh = ground_truths[i]
        cv2.rectangle(image, (int(gx), int(gy)), (int(gx+gw), int(gy+gh)), (0, 255, 0), 2)
        cv2.rectangle(image, (int(x), int(y)), (int(x+w), int(y+h)), (255, 0, 0), 2)
コード例 #16
0
ファイル: cnn.py プロジェクト: AxelInd/DeepLearningLab_ANN
def cnn_model_fn(features, labels, mode):
    """Model function for CNN."""
    # Input Layer
    # HARDCODED!!!!

    opt = Options()
    depth = opt.hist_len
    input_size = np.int32(np.sqrt(opt.state_siz))
    pool_size = 2
    stride_size = 2

    # print("Input size: {}".format(input_size))

    # (batch, depth, height, width, channels)
    input_layer = tf.reshape(features["x"],
                             [-1, depth, input_size, input_size, 1])

    # print("Shape of input: {}".format(input_layer.shape))

    # Convolutional Layer #1
    conv1 = tf.layers.conv3d(inputs=input_layer,
                             filters=32,
                             kernel_size=[5, 5, 5],
                             padding="same",
                             activation=tf.nn.relu)

    # print("Shape of conv1: {}".format(conv1))

    # Pooling Layer #1
    pool1 = tf.layers.max_pooling3d(inputs=conv1,
                                    pool_size=[1, pool_size, pool_size],
                                    strides=(1, stride_size, stride_size))

    # print("Shape of pool1: {}".format(pool1))

    # Convolutional Layer #2 and Pooling Layer #2
    conv2 = tf.layers.conv3d(inputs=pool1,
                             filters=64,
                             kernel_size=[5, 5, 5],
                             padding="same",
                             activation=tf.nn.relu)

    # print("Shape of conv2: {}".format(conv2))

    pool2 = tf.layers.max_pooling3d(inputs=conv2,
                                    pool_size=[depth, 2, 2],
                                    strides=(depth, 2, 2))

    # print("Shape of pool2: {}".format(pool2))

    size_input_after_two_pools = input_size // (2 * pool_size)

    # Dense Layer
    pool2_flat = tf.reshape(
        pool2,
        [-1, size_input_after_two_pools * size_input_after_two_pools * 64])

    # print("Shape of pool2_flat: {}".format(pool2_flat))

    dense = tf.layers.dense(inputs=pool2_flat,
                            units=1024,
                            activation=tf.nn.relu)

    # print("Shape of dense: {}".format(dense))

    dropout = tf.layers.dropout(inputs=dense,
                                rate=0.4,
                                training=mode == tf.estimator.ModeKeys.TRAIN)

    # print("Shape of dropout: {}".format(dropout))

    # Logits Layer
    logits = tf.layers.dense(inputs=dropout, units=5)

    # print("Shape of logits: {}".format(logits))

    predictions = {
        # Generate predictions (for PREDICT and EVAL mode)
        "classes": tf.argmax(input=logits, axis=1),
        # Add `softmax_tensor` to the graph. It is used for PREDICT and by the
        # `logging_hook`.
        "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
    }

    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)

    # Calculate Loss (for both TRAIN and EVAL modes)
    onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=5)

    loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels,
                                           logits=logits)

    # Configure the Training Op (for TRAIN mode)
    if mode == tf.estimator.ModeKeys.TRAIN:
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
        train_op = optimizer.minimize(loss=loss,
                                      global_step=tf.train.get_global_step())
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode)
    eval_metric_ops = {
        "accuracy":
        tf.metrics.accuracy(labels=labels, predictions=predictions["classes"])
    }
    return tf.estimator.EstimatorSpec(mode=mode,
                                      loss=loss,
                                      eval_metric_ops=eval_metric_ops)
コード例 #17
0
def main(unused_argv):
    classifier = cnn.get_estimator()

    # 0. initialization
    opt = Options()
    sim = Simulator(opt.map_ind, opt.cub_siz, opt.pob_siz, opt.act_num)

    # TODO: load your agent
    # Hint: If using standard tensorflow api it helps to write your own model.py
    # file with the network configuration, including a function model.load().
    # You can use saver = tf.train.Saver() and saver.restore(sess, filename_cpkt)

    agent = None

    # 1. control loop
    if opt.disp_on:
        win_all = None
        win_pob = None
    epi_step = 0  # #steps in current episode
    nepisodes = 0  # total #episodes executed
    nepisodes_solved = 0
    action = 0  # action to take given by the network

    # start a new game
    state = sim.newGame(opt.tgt_y, opt.tgt_x)
    for step in range(opt.eval_steps):

        # check if episode ended
        if state.terminal or epi_step >= opt.early_stop:
            epi_step = 0
            nepisodes += 1
            if state.terminal:
                nepisodes_solved += 1
            # start a new game
            state = sim.newGame(opt.tgt_y, opt.tgt_x)
        else:
            #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
            # TODO: here you would let your agent take its action
            #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
            # Hint: get the image using rgb2gray(state.pob), append latest image to a history
            # this just gets a random action
            # action = randrange(opt.act_num)

            image = rgb2gray(state.pob)
            reshaped_image = np.zeros(opt.state_siz)
            flattened_image = image.flatten()[0:opt.state_siz]
            reshaped_image[0:flattened_image.shape[0]] = flattened_image
            reshaped_image = reshaped_image.reshape(1, opt.state_siz)

            if epi_step == 0:
                images_history = np.zeros([opt.hist_len, opt.state_siz])
                for j in range(opt.hist_len):
                    images_history[j] = reshaped_image
            else:
                images_history = np.append(images_history, reshaped_image, 0)
                if images_history.shape[0] > opt.hist_len:
                    images_history = np.delete(images_history, 0, 0)

            images_history_flatten = np.float32(
                images_history.reshape(1, opt.state_siz * opt.hist_len))

            predict_input_fn = tf.estimator.inputs.numpy_input_fn(
                x={"x": images_history_flatten}, num_epochs=1, shuffle=False)

            predictions = classifier.predict(input_fn=predict_input_fn)
            for i, p in enumerate(predictions):
                action_predicted = p["classes"]
                break

            state = sim.step(action_predicted)

            epi_step += 1

        if state.terminal or epi_step >= opt.early_stop:
            epi_step = 0
            nepisodes += 1
            if state.terminal:
                nepisodes_solved += 1
            # start a new game
            state = sim.newGame(opt.tgt_y, opt.tgt_x)

        if step % opt.prog_freq == 0:
            print(step)

        if opt.disp_on:
            if win_all is None:
                plt.subplot(121)
                win_all = plt.imshow(state.screen)
                plt.subplot(122)
                win_pob = plt.imshow(state.pob)
            else:
                win_all.set_data(state.screen)
                win_pob.set_data(state.pob)
            plt.pause(opt.disp_interval)
            plt.draw()

    # 2. calculate statistics
    print("Result for history length: {}".format(opt.hist_len))
    print(float(nepisodes_solved) / float(nepisodes))
コード例 #18
0
def train_model(opt=Options(), save_mdl_name='my_model.h5', epochs=10):
    sim = Simulator(opt.map_ind, opt.cub_siz, opt.pob_siz, opt.act_num)
    trans = TransitionTable(opt.state_siz, opt.act_num, opt.hist_len,
                            opt.minibatch_size, opt.valid_size, opt.states_fil,
                            opt.labels_fil)

    # 1. train
    [train_states, train_labels] = trans.get_train()
    [valid_states, valid_labels] = trans.get_valid()
    print("train data shape {}", train_states.shape)
    print("train data shape {}", train_labels.shape)

    print("valid data shape {}", valid_states.shape)
    print("valid data shape {}", valid_labels.shape)

    train_shaped = train_states.reshape(train_states.shape[0],
                                        opt.cub_siz * opt.pob_siz,
                                        opt.cub_siz * opt.pob_siz,
                                        opt.hist_len)
    valid_shaped = valid_states.reshape(valid_states.shape[0],
                                        opt.cub_siz * opt.pob_siz,
                                        opt.cub_siz * opt.pob_siz,
                                        opt.hist_len)

    #train_shaped = tf.reshape(train_states, [-1,25, 25, 4])
    train_shaped = train_shaped.astype('float32')
    valid_shaped = valid_shaped.astype('float32')
    num_classes = 5

    input_shape = (opt.cub_siz * opt.pob_siz, opt.cub_siz * opt.pob_siz,
                   opt.hist_len)

    # print(train_shaped.shape)

    class AccuracyHistory(keras.callbacks.Callback):
        def on_train_begin(self, logs={}):
            self.acc = []

        def on_epoch_end(self, batch, logs={}):
            self.acc.append(logs.get('acc'))

    history = AccuracyHistory()

    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=(2, 2),
               activation='relu',
               input_shape=input_shape))
    #model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    #model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1000, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))

    #keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    #model.compile(loss=keras.losses.categorical_crossentropy,
    #              optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
    #              metrics=['accuracy'])
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.SGD(lr=0.001),
                  metrics=['accuracy'])

    model.fit(train_shaped,
              train_labels,
              batch_size=trans.minibatch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(valid_shaped, valid_labels),
              callbacks=[history])

    # 2. save your trained model
    model.save(save_mdl_name)
コード例 #19
0
    "--steps",
    help="(Optional) Number of steps to train the model for. Default is 1000",
    type=int,
    default=1000)
parser.add_argument("-ss",
                    "--silent",
                    help="Runs test without displaying the simulation",
                    action="store_false")

args = parser.parse_args()

model_path = args.model

# --------------------------------------------------------
# 0. initialization
opt = Options()
sim = Simulator(opt.map_ind, opt.cub_siz, opt.pob_siz, opt.act_num)

opt.disp_on = args.silent

if args.steps:
    opt.steps = args.steps

# --------------------------------------------------------
# Input layer
image_dimension = opt.cub_siz * opt.pob_siz
img_rows = img_cols = image_dimension
input_shape = [img_rows, img_cols, opt.hist_len]

# --------------------------------------------------------
# Model
コード例 #20
0
ファイル: cnn.py プロジェクト: AxelInd/DeepLearningLab_ANN
def get_network_for_input_raw(state_batch):
  opt = Options()
  depth = opt.hist_len
  input_size = np.int32(np.sqrt(opt.state_siz))
  pool_size = 2
  stride_size = 2

  with tf.variable_scope("DQN", reuse=tf.AUTO_REUSE):
    # print("Input size: {}".format(input_size))

    # (batch, depth, height, width, channels)
    input_layer = tf.reshape(state_batch, [-1, depth, input_size, input_size, 1])

    # print("Shape of state_batch: {}".format(input_layer.shape))

    # Convolutional Layer #1
    conv1 = tf.layers.conv3d(
        inputs=input_layer,
        filters=32,
        kernel_size=[5, 5, 5],
        padding="same",
        activation=tf.nn.relu)

    # print("Shape of conv1: {}".format(conv1))

    # Pooling Layer #1
    pool1 = tf.layers.max_pooling3d(
      inputs=conv1,
      pool_size=[1, pool_size, pool_size],
      strides=(1, stride_size, stride_size)
    )

    # print("Shape of pool1: {}".format(pool1))

    # Convolutional Layer #2 and Pooling Layer #2
    conv2 = tf.layers.conv3d(
        inputs=pool1,
        filters=32,
        kernel_size=[5, 5, 5],
        padding="same",
        activation=tf.nn.relu)

    # print("Shape of conv2: {}".format(conv2))

    pool2 = tf.layers.max_pooling3d(
      inputs=conv2,
      pool_size=[depth, 2, 2],
      strides=(depth, 2, 2)
    )

    # print("Shape of pool2: {}".format(pool2))

    size_input_after_two_pools = input_size // (2 * pool_size)

    # Dense Layer
    pool2_flat = tf.reshape(pool2, [-1, size_input_after_two_pools * size_input_after_two_pools * 32])

    # print("Shape of pool2_flat: {}".format(pool2_flat))

    dense = tf.layers.dense(inputs=pool2_flat, units=128, activation=tf.nn.relu)

    if (state_batch.shape[0] > 1):
      trainingMode = True
    else:
      trainingMode = False

    dropout = tf.layers.dropout(
      inputs=dense, rate=0.4,
      training=trainingMode
    )

    # print("Shape of dense: {}".format(dense))

    # Logits Layer
    q_s = tf.layers.dense(inputs=dropout, units=5)
    # print("Shape of q_s: {}".format(q_s))

    return q_s