Esempio n. 1
0
 def create_model(self, charset=None):
     return model.Model(self.num_char_classes,
                        self.seq_length,
                        num_views=4,
                        null_code=62,
                        charset=charset)
Esempio n. 2
0
 def setUp(self):
     self.model = model.Model(0.1, 0.1)
Esempio n. 3
0
import torch

import utility
import data
import model
import loss
from option import args
from trainer import Trainer

torch.manual_seed(args.seed)
checkpoint = utility.checkpoint(args)

if checkpoint.ok:
    loader = data.Data(args)
    model = model.Model(args, checkpoint)
    loss = loss.Loss(args, checkpoint) if not args.test_only else None
    t = Trainer(args, loader, model, loss, checkpoint)
    while not t.terminate():
        t.train()
        t.test()

    checkpoint.done()


Esempio n. 4
0
def modelFromFile(filename):
    learned_list = pickle.load(open(filename, 'rb'))
    mod = model.Model()
    mod.learned_config = learned_list
    return mod
Esempio n. 5
0
def create_model(*args, **kwargs):
    ocr_model = model.Model(mparams=create_mparams(), *args, **kwargs)
    return ocr_model
Esempio n. 6
0
    print 'song file:', songpath
    dictpath = sys.argv[2]
    print 'codebook file:', dictpath

    # feats
    feats = FEATS.features_from_matfile(songpath,
                                        pSize=pSize,
                                        usebars=usebars,
                                        keyInv=keyInv,
                                        songKeyInv=songKeyInv,
                                        positive=positive,
                                        do_resample=do_resample)
    # model
    mat = scipy.io.loadmat(dictpath)
    codebook = mat['codebook']
    model = MODEL.Model(codebook)

    # predict
    best_code_per_pattern, avg_dist = model.predicts(feats)

    # report distortion (per... pixel? patch point?)
    print 'average distortion:', np.average(avg_dist)

    # build original and encoding
    patch_len = codebook.shape[1] / 12
    btchroma = np.concatenate([c.reshape(12, patch_len) for c in feats],
                              axis=1)
    btchroma_encoded = np.concatenate([
        codebook[int(k)].reshape(12, patch_len) for k in best_code_per_pattern
    ],
                                      axis=1)
Esempio n. 7
0
    comment_pos=None
    equal_sign_pos=None
    line_end_pos=None
    line_start_pos=loc
    while str_text[loc]!='\n':
        if str_text[loc]=='#':
            comment_pos=loc
        elif str_text[loc]=='=':
            equal_sign_pos=loc
        else:
            pass
        loc+=1
    line_end_pos=loc
    if comment_pos==None:
        comment_pos=line_end_pos
    text_to_be_feed=key_text+'='+str(replace_value)+str_text[comment_pos:line_end_pos]
    str_text=str_text.replace(str_text[line_start_pos:line_end_pos],text_to_be_feed,1)
    return str_text

if __name__=="__main__":
    mod = model.Model()
    config = io.Config()
    opt = diffev.DiffEv()
    io.load_gx(gx_file_path,mod,opt,config)
    mod.script=replace_script_section(mod.script,'running_mode','0')
    print('Simulate and dump files now!')
    mod.simulate()
    print('Plot files are dumpt to pocket!')
    print('Plot the results now!')
    plot_all(plot_e_model=plot_e_model,plot_e_FS=plot_e_FS,plot_ctr=plot_ctr,plot_raxr=plot_raxr,plot_AP_Q=plot_AP_Q)
Esempio n. 8
0
def _run_inference(output_dir=None,
                   file_extension='png',
                   depth=True,
                   egomotion=False,
                   objmotion=False,
                   model_ckpt=None,
                   input_dir=None,
                   input_list_file=None,
                   batch_size=1,
                   img_height=128,
                   img_width=416,
                   seq_length=3,
                   architecture=nets.RESNET,
                   imagenet_norm=True,
                   use_skip=True,
                   joint_encoder=True,
                   shuffle=False,
                   flip_for_depth=False,
                   inference_mode=INFERENCE_MODE_SINGLE,
                   inference_crop=INFERENCE_CROP_NONE,
                   use_masks=False):
  """Runs inference. Refer to flags in inference.py for details."""
  inference_model = model.Model(is_training=False,
                                batch_size=batch_size,
                                img_height=img_height,
                                img_width=img_width,
                                seq_length=seq_length,
                                architecture=architecture,
                                imagenet_norm=imagenet_norm,
                                use_skip=use_skip,
                                joint_encoder=joint_encoder)

  global ego_prev_rotate1, ego_prev_rotate2, ego_prev_rotate3, ego_baru_x, ego_baru_y, write_x_ego, write_y_ego, img_array1, img_array2, max_width, image_con, total_height, img_array3

  vars_to_restore = util.get_vars_to_save_and_restore(model_ckpt)
  saver = tf.train.Saver(vars_to_restore)
  sv = tf.train.Supervisor(logdir='/tmp/', saver=None)
  with sv.managed_session() as sess:
    saver.restore(sess, model_ckpt)
    if not gfile.Exists(output_dir):
      gfile.MakeDirs(output_dir)
    logging.info('Predictions will be saved in %s.', output_dir)

    # Collect all images to run inference on.
    im_files, basepath_in = collect_input_images(input_dir, input_list_file,
                                                 file_extension)
    if shuffle:
      logging.info('Shuffling data...')
      np.random.shuffle(im_files)
    logging.info('Running inference on %d files.', len(im_files))

    # Create missing output folders and pre-compute target directories.
    output_dirs = create_output_dirs(im_files, basepath_in, output_dir)

    # Run depth prediction network.
    if depth:
      im_batch = []
      for i in range(len(im_files)):
        if i % 100 == 0:
          logging.info('%s of %s files processed.', i, len(im_files))

        # Read image and run inference.
        if inference_mode == INFERENCE_MODE_SINGLE:
          if inference_crop == INFERENCE_CROP_NONE:
            im = util.load_image(im_files[i], resize=(img_width, img_height))
          elif inference_crop == INFERENCE_CROP_CITYSCAPES:
            im = util.crop_cityscapes(util.load_image(im_files[i]),
                                      resize=(img_width, img_height))
        elif inference_mode == INFERENCE_MODE_TRIPLETS:
          im = util.load_image(im_files[i], resize=(img_width * 3, img_height))
          im = im[:, img_width:img_width*2]
        if flip_for_depth:
          im = np.flip(im, axis=1)
        im_batch.append(im)

        if len(im_batch) == batch_size or i == len(im_files) - 1:
          # Call inference on batch.
          for _ in range(batch_size - len(im_batch)):  # Fill up batch.
            im_batch.append(np.zeros(shape=(img_height, img_width, 3),
                                     dtype=np.float32))
          im_batch = np.stack(im_batch, axis=0)
          est_depth = inference_model.inference_depth(im_batch, sess)
          if flip_for_depth:
            est_depth = np.flip(est_depth, axis=2)
            im_batch = np.flip(im_batch, axis=2)

          for j in range(len(im_batch)):
            color_map = util.normalize_depth_for_display(
                np.squeeze(est_depth[j]))
            visualization = np.concatenate((im_batch[j], color_map), axis=0)
            # Save raw prediction and color visualization. Extract filename
            # without extension from full path: e.g. path/to/input_dir/folder1/
            # file1.png -> file1
            k = i - len(im_batch) + 1 + j
            filename_root = os.path.splitext(os.path.basename(im_files[k]))[0]
            pref = '_flip' if flip_for_depth else ''
            output_raw = os.path.join(
                output_dirs[k], filename_root + pref + '.npy')
            output_vis = os.path.join(
                output_dirs[k], filename_root + pref + '.png')
            with gfile.Open(output_raw, 'wb') as f:
              np.save(f, est_depth[j])
            util.save_image(output_vis, visualization, file_extension)
          im_batch = []

    if objmotion:
      print("asem")

    # Run egomotion network.
    if egomotion:
      if inference_mode == INFERENCE_MODE_SINGLE:
        # Run regular egomotion inference loop.
        input_image_seq = []
        input_seg_seq = []
        current_sequence_dir = None
        current_output_handle = None
        current_output_handle2 = None
        for i in range(len(im_files)):
          sequence_dir = os.path.dirname(im_files[i])
          if sequence_dir != current_sequence_dir:
            # Assume start of a new sequence, since this image lies in a
            # different directory than the previous ones.
            # Clear egomotion input buffer.
            output_filepath = os.path.join(output_dirs[i], 'totalmotion.txt')
            output_filepath2 = os.path.join(output_dirs[i], 'objmotion.txt')
            if current_output_handle is not None:
              current_output_handle.close()

            if current_output_handle2 is not None:
              current_output_handle2.close()

            current_sequence_dir = sequence_dir
            logging.info('Writing egomotion sequence to %s.', output_filepath)
            logging.info('Writing objmotion sequence to %s.', output_filepath2)
            current_output_handle = gfile.Open(output_filepath, 'w')
            current_output_handle2 = gfile.Open(output_filepath2, 'w')
            input_image_seq = []
          im = util.load_image(im_files[i], resize=(img_width, img_height))
          input_image_seq.append(im)
          if use_masks:
            im_seg_path = im_files[i].replace('.%s' % file_extension,
                                              '-seg.%s' % file_extension)
            if not gfile.Exists(im_seg_path):
              raise ValueError('No segmentation mask %s has been found for '
                               'image %s. If none are available, disable '
                               'use_masks.' % (im_seg_path, im_files[i]))
            input_seg_seq.append(util.load_image(im_seg_path,
                                                 resize=(img_width, img_height),
                                                 interpolation='nn'))

          if len(input_image_seq) < seq_length:  # Buffer not filled yet.
            continue
          if len(input_image_seq) > seq_length:  # Remove oldest entry.
            del input_image_seq[0]
            if use_masks:
              del input_seg_seq[0]

          input_image_stack = np.concatenate(input_image_seq, axis=2)
          input_image_stack = np.expand_dims(input_image_stack, axis=0)
          if use_masks:
            input_image_stack = mask_image_stack(input_image_stack,
                                                 input_seg_seq)

          # print('')
          # print('wenaknoooooooooooooooooooooooooo')
          # print('')

          est_egomotion = np.squeeze(inference_model.inference_egomotion(
              input_image_stack, sess))

          #####################################################

          print('')
          print('est_egomotion1 = ', est_egomotion)
          print('est_egomotion2 = ', est_egomotion[0])
          print('est_egomotion3 = ', est_egomotion[0][0])
          print('est_egomotion4 = ', est_egomotion[0][5])
          #a

          ego_x_prev = float(est_egomotion[0][0])*scaling
          ego_y_prev = float(est_egomotion[0][1])*scaling
          ego_z_prev = float(est_egomotion[0][2])*scaling

          ego_rot1_prev = float(est_egomotion[0][3])
          ego_rot2_prev = float(est_egomotion[0][4])
          ego_rot3_prev = float(est_egomotion[0][5])

          ego_prev_rotate1 = ego_prev_rotate1 + ego_rot1_prev
          ego_prev_rotate2 = ego_prev_rotate2 + ego_rot2_prev
          ego_prev_rotate3 = ego_prev_rotate3 + ego_rot3_prev

          ego_coorY_prev = (((np.cos(ego_prev_rotate2)*np.cos(ego_prev_rotate3)) - (np.sin(ego_prev_rotate1)*np.sin(ego_prev_rotate2)*np.sin(ego_prev_rotate3)))*ego_x_prev) - (np.cos(ego_prev_rotate1)*np.sin(ego_prev_rotate3)*ego_y_prev)+ (((np.sin(ego_prev_rotate2)*np.cos(ego_prev_rotate3))+(np.sin(ego_prev_rotate1)*np.cos(ego_prev_rotate2)*np.sin(ego_prev_rotate3)))*ego_z_prev)
          ego_coorX_prev = ((np.cos(ego_prev_rotate1)*np.sin(ego_prev_rotate2))*ego_x_prev) + (np.sin(ego_prev_rotate1)*ego_y_prev) + ((np.cos(ego_prev_rotate1)*np.cos(ego_prev_rotate2))*ego_z_prev)

          ego_prev_x, ego_prev_y = ego_coorY_prev*scale, -ego_coorX_prev*scale

          ego_baru_x = ego_baru_x + (-ego_prev_x*1.5)
          ego_baru_y = ego_baru_y + (-ego_prev_y*1.5)
          
          write_x_ego = 640 - ego_baru_x
          write_y_ego = 900 - ego_baru_y

          cv2.circle(traj_new, (int(write_x_ego), int(write_y_ego)) ,1, (0,255,0), 2)
          cv2.circle(traj_new, (int(write_x_ego-14), int(write_y_ego+3)) ,1, (255,255,255), 3)
          cv2.circle(traj_new, (int(write_x_ego+14), int(write_y_ego+3)) ,1, (255,255,255), 3)

          traj_viz = traj_new.copy()

          cv2.rectangle(traj_viz, (int(write_x_ego)-7,int(write_y_ego)-14), (int(write_x_ego)+7,int(write_y_ego)+14), (0,255,0), 2)

          # Read image
          print('im_files[i] = ', im_files[i])
          imgg = cv2.imread(im_files[i])
          img_array1.append(imgg)
          height1, width1, layers1 = imgg.shape
          size1 = (width1,height1)

          img_array2.append(traj_viz)
          height2, width2, layers2 = traj_viz.shape
          size2 = (width2,height2)

          image_con = []
          max_width = 0
          total_height = 0

          image_con.append(traj_viz)
          if image_con[-1].shape[1] > max_width:
              max_width = image_con[-1].shape[1]
          total_height += image_con[-1].shape[0]

          image_con.append(imgg)
          if image_con[-1].shape[1] > max_width:
              max_width = image_con[-1].shape[1]
          total_height += image_con[-1].shape[0]

          final_image = np.zeros((total_height,max_width,3),dtype=np.uint8)

          current_y = 0 # keep track of where your current image was last placed in the y coordinate
          for image in image_con:
            # add an image to the final array and increment the y coordinate
            final_image[current_y:image.shape[0]+current_y,:image.shape[1],:] = image
            current_y += image.shape[0]

          img_array3.append(final_image)
          height3, width3, layers3 = final_image.shape
          size3 = (width3,height3)

          #cv2.imshow('moms', final_image)
          #cv2.imwrite("a.jpg", final_image)

          #cv2.imshow('img', imgg)
          cv2.imshow('ngaplo', traj_viz)
          cv2.waitKey(1)


          #######################################################

          est_objectmotion = np.squeeze(inference_model.inference_objectmotion(
              input_image_stack, sess))


          egomotion_str = []
          objectmotion_str = []

          for j in range(seq_length - 1):
            egomotion_str.append(','.join([str(d) for d in est_egomotion[j]]))
            objectmotion_str.append(','.join([str(d) for d in est_objectmotion[j]]))

          current_output_handle.write(
              str(i) + ' ' + ' '.join(egomotion_str) +',' + ' '.join(objectmotion_str) + '\n')
          
          current_output_handle2.write(
              str(i) + ' ' + ' '.join(objectmotion_str) + '\n')

        if current_output_handle is not None:
          current_output_handle.close()

        if current_output_handle2 is not None:
          current_output_handle2.close()

        ### SAVE IMAGE TO VIDEO
        #out1 = cv2.VideoWriter('scene1.avi', cv2.VideoWriter_fourcc(*'DIVX'), 15, size1)
        #out2 = cv2.VideoWriter('result1.avi', cv2.VideoWriter_fourcc(*'DIVX'), 15, size2)
        out3 = cv2.VideoWriter('total1.avi', cv2.VideoWriter_fourcc(*'DIVX'), 15, size3)

        # for i in range(len(img_array1)):
        #   out1.write(img_array1[i])
        # out1.release()

        # for j in range(len(img_array2)):
        #   out2.write(img_array2[j])
        # out2.release()

        for j in range(len(img_array3)):
          out3.write(img_array3[j])
        out3.release()

      elif inference_mode == INFERENCE_MODE_TRIPLETS:
        print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
        written_before = []
        for i in range(len(im_files)):
          im = util.load_image(im_files[i], resize=(img_width * 3, img_height))
          input_image_stack = np.concatenate(
              [im[:, :img_width], im[:, img_width:img_width*2],
               im[:, img_width*2:]], axis=2)
          input_image_stack = np.expand_dims(input_image_stack, axis=0)
          if use_masks:
            im_seg_path = im_files[i].replace('.%s' % file_extension,
                                              '-seg.%s' % file_extension)
            if not gfile.Exists(im_seg_path):
              raise ValueError('No segmentation mask %s has been found for '
                               'image %s. If none are available, disable '
                               'use_masks.' % (im_seg_path, im_files[i]))
            seg = util.load_image(im_seg_path,
                                  resize=(img_width * 3, img_height),
                                  interpolation='nn')
            input_seg_seq = [seg[:, :img_width], seg[:, img_width:img_width*2],
                             seg[:, img_width*2:]]
            input_image_stack = mask_image_stack(input_image_stack,
                                                 input_seg_seq)
          est_egomotion = inference_model.inference_egomotion(
              input_image_stack, sess)
          est_egomotion = np.squeeze(est_egomotion)
          egomotion_1_2 = ','.join([str(d) for d in est_egomotion[0]])
          egomotion_2_3 = ','.join([str(d) for d in est_egomotion[1]])

          output_filepath = os.path.join(output_dirs[i], 'egomotion.txt')
          file_mode = 'w' if output_filepath not in written_before else 'a'
          with gfile.Open(output_filepath, file_mode) as current_output_handle:
            current_output_handle.write(str(i) + ' ' + egomotion_1_2 + ' ' +
                                        egomotion_2_3 + '\n')
          written_before.append(output_filepath)
      #logging.info('Done.')

    elif objmotion:
      print('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb')
      input_image_seq = []
      input_seg_seq = []
      current_sequence_dir = None
      current_output_handle = None
      for i in range(len(im_files)):
        sequence_dir = os.path.dirname(im_files[i])
        if sequence_dir != current_sequence_dir:
          output_filepath = os.path.join(output_dirs[i], 'objmotion.txt')
          if current_output_handle is not None:
            current_output_handle.close()
            current_sequence_dir = sequence_dir
          logging.info('Writing egomotion sequence to %s.', output_filepath)
          logging.info('Writing objmotion sequence to %s.', output_filepath2)
          current_output_handle = gfile.Open(output_filepath, 'w')
          input_image_seq = []
        im = util.load_image(im_files[i], resize=(img_width, img_height))
        input_image_seq.append(im)
        if use_masks:
          im_seg_path = im_files[i].replace('.%s' % file_extension,
                                              '-seg.%s' % file_extension)
          if not gfile.Exists(im_seg_path):
            raise ValueError('No segmentation mask %s has been found for '
                              'image %s. If none are available, disable '
                              'use_masks.' % (im_seg_path, im_files[i]))
          input_seg_seq.append(util.load_image(im_seg_path,
                                                 resize=(img_width, img_height),
                                                 interpolation='nn'))

        if len(input_image_seq) < seq_length:  # Buffer not filled yet.
            continue
        if len(input_image_seq) > seq_length:  # Remove oldest entry.
          del input_image_seq[0]
          if use_masks:
            del input_seg_seq[0]

        input_image_stack = np.concatenate(input_image_seq, axis=2)
        input_image_stack = np.expand_dims(input_image_stack, axis=0)
        if use_masks:
          input_image_stack = mask_image_stack(input_image_stack,
                                                 input_seg_seq)

        est_objectmotion = np.squeeze(inference_model.inference_objectmotion(
              input_image_stack, sess))
Esempio n. 9
0
def main(_):
  # Fixed seed for repeatability
  seed = 8964
  tf.set_random_seed(seed)
  np.random.seed(seed)
  random.seed(seed)

  # if FLAGS.handle_motion and FLAGS.joint_encoder:
  #   raise ValueError('Using a joint encoder is currently not supported when '
  #                    'modeling object motion.')
  if FLAGS.handle_motion and FLAGS.seq_length != 3:
    raise ValueError('The current motion model implementation only supports '
                     'using a sequence length of three.')
  if FLAGS.handle_motion and not FLAGS.compute_minimum_loss:
    raise ValueError('Computing the minimum photometric loss is required when '
                     'enabling object motion handling.')
  if FLAGS.size_constraint_weight > 0 and not FLAGS.handle_motion:
    raise ValueError('To enforce object size constraints, enable motion '
                     'handling.')
  if FLAGS.imagenet_ckpt and not FLAGS.imagenet_norm:
    logging.warn('When initializing with an ImageNet-pretrained model, it is '
                 'recommended to normalize the image inputs accordingly using '
                 'imagenet_norm.')
  if FLAGS.compute_minimum_loss and FLAGS.seq_length % 2 != 1:
    raise ValueError('Compute minimum loss requires using an odd number of '
                     'images in a sequence.')
  if FLAGS.architecture != nets.RESNET and FLAGS.imagenet_ckpt:
    raise ValueError('Can only load weights from pre-trained ImageNet model '
                     'when using ResNet-architecture.')
  if FLAGS.compute_minimum_loss and FLAGS.exhaustive_mode:
    raise ValueError('Exhaustive mode has no effect when compute_minimum_loss '
                     'is enabled.')
  if FLAGS.img_width % (2 ** 5) != 0 or FLAGS.img_height % (2 ** 5) != 0:
    logging.warn('Image size is not divisible by 2^5. For the architecture '
                 'employed, this could cause artefacts caused by resizing in '
                 'lower dimensions.')
  if FLAGS.icp_weight > 0.0:
    # TODO(casser): Change ICP interface to take matrix instead of vector.
    raise ValueError('ICP is currently not supported.')

  if not gfile.Exists(FLAGS.checkpoint_dir):
    gfile.MakeDirs(FLAGS.checkpoint_dir)

  train_model = model.Model(data_dir=FLAGS.data_dir,
                            file_extension=FLAGS.file_extension,
                            is_training=True,
                            learning_rate=FLAGS.learning_rate,
                            beta1=FLAGS.beta1,
                            reconstr_weight=FLAGS.reconstr_weight,
                            smooth_weight=FLAGS.smooth_weight,
                            ssim_weight=FLAGS.ssim_weight,
                            icp_weight=FLAGS.icp_weight,
                            batch_size=FLAGS.batch_size,
                            img_height=FLAGS.img_height,
                            img_width=FLAGS.img_width,
                            seq_length=FLAGS.seq_length,
                            architecture=FLAGS.architecture,
                            imagenet_norm=FLAGS.imagenet_norm,
                            weight_reg=FLAGS.weight_reg,
                            exhaustive_mode=FLAGS.exhaustive_mode,
                            random_scale_crop=FLAGS.random_scale_crop,
                            flipping_mode=FLAGS.flipping_mode,
                            depth_upsampling=FLAGS.depth_upsampling,
                            depth_normalization=FLAGS.depth_normalization,
                            compute_minimum_loss=FLAGS.compute_minimum_loss,
                            use_skip=FLAGS.use_skip,
                            joint_encoder=False,
                            handle_motion=FLAGS.handle_motion,
                            equal_weighting=FLAGS.equal_weighting,
                            size_constraint_weight=FLAGS.size_constraint_weight)

  train(train_model, FLAGS.pretrained_ckpt, FLAGS.imagenet_ckpt,
        FLAGS.checkpoint_dir, FLAGS.train_steps, FLAGS.summary_freq)
Esempio n. 10
0
import data
import loss
import torch
import model
from trainer import Trainer

from option import args
import util.utility as utility

ckpt = utility.checkpoint(args)

loader = data.Data(args)
model = model.Model(args, ckpt)
loss = loss.Loss(args, ckpt) if not args.test_only else None
trainer = Trainer(args, model, loss, loader, ckpt)

n = 0
while not trainer.terminate():
    n += 1
    trainer.train()
    if args.test_every != 0 and n % args.test_every == 0:
        trainer.test()
Esempio n. 11
0
def main(config):
    # SET DEVICE
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
        str(gpu) for gpu in config["COMMON"]["GPUS"])
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    DATE = datetime.datetime.now().strftime("%Y_%m_%d/%H_%M_%S")

    SAVEPATH = os.path.join(config["COMMON"]["SAVEPATH"], DATE)
    config["COMMON"]["SAVEPATH"] = SAVEPATH
    os.makedirs(SAVEPATH)
    utils.set_logger(os.path.join(SAVEPATH, "train.log"))
    utils.write_yaml(os.path.join(SAVEPATH, "config.yaml"), config)

    # DATA LOADING
    logging.info(f'Loading {config["DATA"]["NAME"]} datasets')
    transform = [
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(15)
    ]
    loader = trainer.Dataloader(config["DATA"])
    # check configuration & real
    num_classes = len(loader["train"].dataset.classes)
    assert num_classes == config["MODEL"][
        "NUMCLASSES"], f'Number of class is not same!\nIn Directory: {num_classes}\nIn Configuration: {config["MODEL"]["NUMCLASSES"]}'

    # PREPROCESSING
    # Add New class
    # Add New data

    # MODEL BUILD
    logging.info(f"Building model")
    net = model.Model(config["MODEL"]["BASEMODEL"],
                      config["MODEL"]["NUMCLASSES"],
                      config["MODEL"]["FREEZE"]).to(device)
    # net = model.Model(num_classes=config["MODEL"]["NUMCLASSES"]).to(device)

    if torch.cuda.is_available() and len(config["COMMON"]["GPUS"]) > 1:
        logging.info(f"Multi GPU mode")
        net = torch.nn.DataParallel(
            net, device_ids=config["COMMON"]["GPUS"]).to(device)

    criterion = model.loss_fn
    metrics = {"acc": model.accuracy}  # If classification
    # metrics = {}
    optm = optm_dict[config["TRAIN"]["OPTIMIZER"]](
        net.parameters(), lr=config["TRAIN"]["LEARNINGRATE"])

    # TRAINING
    EPOCHS = config["TRAIN"]["EPOCHS"]
    logging.info(f"Training start !")
    best_val_loss = np.inf
    for epoch in range(EPOCHS):

        metrics_summary = trainer.train(epoch, net, optm, criterion,
                                        loader["train"], metrics, device,
                                        config)
        metrics_summary.update(
            trainer.eval(epoch, net, optm, criterion, loader["validation"],
                         metrics, device, config))

        metrics_string = " ; ".join(f"{key}: {value:05.3f}"
                                    for key, value in metrics_summary.items())
        logging.info(f"[{epoch+1}/{EPOCHS}] Performance: {metrics_string}")

        is_best = metrics_summary['val_loss'] <= best_val_loss

        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': net.state_dict(),
                'optim_dict': optm.state_dict()
            },
            is_best=is_best,
            checkpoint=SAVEPATH)

        if is_best:
            logging.info("Found new best loss !")
            best_val_loss = metrics_summary['val_loss']

            best_json_path = os.path.join(SAVEPATH, "metrics_best.json")
            utils.save_dict_to_json(metrics_summary, best_json_path, is_best)

        last_json_path = os.path.join(SAVEPATH, "metrics_history.json")
        utils.save_dict_to_json(metrics_summary, last_json_path)

    # Data version control

    logging.info(f"Training done !")
Esempio n. 12
0
def _run_inference():
    """Runs all images through depth model and saves depth maps."""
    ckpt_basename = os.path.basename(FLAGS.model_ckpt)
    ckpt_modelname = os.path.basename(os.path.dirname(FLAGS.model_ckpt))
    og_dir = FLAGS.output_dir
    output_dir = os.path.join(
        FLAGS.output_dir,
        FLAGS.kitti_video.replace('/', '_') + '_' + ckpt_modelname + '_' +
        ckpt_basename)
    if not gfile.Exists(output_dir):
        gfile.MakeDirs(output_dir)
    inference_model = model.Model(is_training=False,
                                  seq_length=FLAGS.seq_length,
                                  batch_size=FLAGS.batch_size,
                                  img_height=FLAGS.img_height,
                                  img_width=FLAGS.img_width)
    vars_to_restore = util.get_vars_to_restore(FLAGS.model_ckpt)
    saver = tf.train.Saver(vars_to_restore)
    sv = tf.train.Supervisor(logdir='/tmp/', saver=None)
    with sv.managed_session() as sess:
        saver.restore(sess, FLAGS.model_ckpt)
        if FLAGS.kitti_video == 'test_files_eigen':
            im_files = util.read_text_lines(
                util.get_resource_path('dataset/kitti/test_files_eigen.txt'))
            im_files = [os.path.join(FLAGS.kitti_dir, f) for f in im_files]
        else:
            video_path = os.path.join(FLAGS.kitti_dir, FLAGS.kitti_video)
            #edit
            #im_files = gfile.Glob(os.path.join(video_path, 'image_02/data', '*.png'))
            im_files = gfile.Glob(os.path.join(video_path, '*.png'))  #delete
            #end edit
            im_files = [f for f in im_files if 'disp' not in f]
            im_files = sorted(im_files)
        # regularPictures(im_files, output_dir, inference_model)
        for i in range(0, len(im_files), FLAGS.batch_size):
            if i % 100 == 0:
                logging.info('Generating from %s: %d/%d', ckpt_basename, i,
                             len(im_files))
            inputs = np.zeros(
                (FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width, 3),
                dtype=np.uint8)
            for b in range(FLAGS.batch_size):
                idx = i + b
                if idx >= len(im_files):
                    break
                im = scipy.misc.imread(im_files[idx],
                                       mode='RGB')  #added 2nd arg
                inputs[b] = scipy.misc.imresize(
                    im, (FLAGS.img_height, FLAGS.img_width))
            results = inference_model.inference(inputs, sess, mode='depth')
            for b in range(FLAGS.batch_size):
                idx = i + b
                if idx >= len(im_files):
                    break
                if FLAGS.kitti_video == 'test_files_eigen':
                    depth_path = os.path.join(output_dir, '%03d.png' % idx)
                else:
                    depth_path = os.path.join(output_dir, '%04d.png' % idx)
                depth_map = results['depth'][b]
                depth_map = np.squeeze(depth_map)
                colored_map = _normalize_depth_for_display(depth_map,
                                                           cmap=CMAP)
                input_float = inputs[b].astype(np.float32) / 255.0
                vertical_stack = np.concatenate((input_float, colored_map),
                                                axis=0)
                scipy.misc.imsave(depth_path, vertical_stack)
                #edit
                if FLAGS.kitti_video == 'test_files_eigen':
                    outPath = os.path.join(og_dir, 'reg_pics',
                                           '%03d.png' % idx)
                else:
                    outPath = os.path.join(og_dir, 'reg_pics',
                                           '%04d.png' % idx)
                tempImg = scipy.misc.imread(im_files[0], mode='RGB')
                resized_colored_map = scipy.misc.imresize(
                    colored_map, (len(tempImg), len(tempImg[0])))
                scipy.misc.imsave(outPath, resized_colored_map)
sys.path.append('./method')
import os
import numpy as np
savefig = True
if savefig:
    import matplotlib
    matplotlib.use('Agg')
import matplotlib.pyplot as plt

import model as m

savedir = './fig'
if not os.path.isdir(savedir):
    os.makedirs(savedir)

model_tnnp = m.Model('./mmt-model-files/tnnp-2004.mmt')
model_tnnp.set_name('tnnp-2004')
model_tnnpw = m.Model('./mmt-model-files/tnnp-2004-w.mmt')
model_tnnpw.set_name('tnnp-w-2004')
model_fink = m.Model('./mmt-model-files/fink-2008.mmt')
model_fink.set_name('fink-2008')

# Default stimuli
times = np.linspace(0, 1000, 5000)

sim_tnnp = model_tnnp.simulate(np.ones(model_tnnp.n_parameters()), times)
sim_tnnpw = model_tnnpw.simulate(np.ones(model_tnnpw.n_parameters()), times)
sim_fink = model_fink.simulate(np.ones(model_fink.n_parameters()), times)

for _ in range(10):
    assert (np.all(
Esempio n. 14
0
    params['ModelParams'][
        'testInterval'] = 2000  # the number of training interations between testing
    params['ModelParams']['device_ids'] = [
        0, 1
    ]  # the id of the GPUs for the multi-GPU

    # params of the DataManager
    params['DataManagerParams']['VolSize'] = np.asarray(
        [64, 64, 24], dtype=int)  # the size of the crop image
    params['DataManagerParams']['TestStride'] = np.asarray(
        [64, 64, 24], dtype=int
    )  # the stride of the adjacent crop image in testing phase and validation phase

    # Ture: produce the probaility map in the testing phase, False: produce the  label image
    params['TestParams']['ProbabilityMap'] = False

    model = model.Model(params)
    train = [i for i, j in enumerate(sys.argv) if j == '-train']
    if len(train) > 0:
        model.train()  #train model

    test = [i for i, j in enumerate(sys.argv) if j == '-test']
    for i in sys.argv:
        if (i.isdigit()):
            snapnumber = i
            break
    if len(test) > 0:
        model.test(
            snapnumber
        )  # test model, the snapnumber is the number of the model snapshot
Esempio n. 15
0
batch_size_per_worker = int(args.batch_size_per_worker)
resiliency = float(args.resiliency)
num_peers = int(args.num_peers)
alpha = float(args.compression_alpha)
compression = int(args.compression)
r = float(args.compression_rate)
if compression == 0:
    r = 1
server_Address = args.server_address.encode()

comm = MPI.COMM_WORLD
rank = comm.Get_rank()
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 28, 28, 1)) / 255.
x_test = np.reshape(x_test, (-1, 28, 28, 1)) / 255.
model = model.Model(x_train, y_train)
# server_Address = b"localhost:8080"
params_count = 199658
samples = int(params_count / r)
log_degree = 13
log_scale = 40
input_len = model.flat_gradient_shape[0]
if rank == 0:
    print("----------------------------------------")
    print("Master at: ", server_Address)
    print("Number of clients: ", num_peers)
    print("Robustness against dropouts: ", robust, " with resiliency: ", resiliency)
    print("Learining with rate: ", learning_rate, " for ", iterations, " iterations and batch size of ",
          batch_size_per_worker)
    print("------------------------------------------")
# print(input_len)
Esempio n. 16
0
def main():

    # cleanup input dir
    ret = input('Are you sure you want to clean %s [yes|no] ' % (WORK_DIR, ))
    #     ret = 'yes'
    if ret == 'yes':
        for f in glob.glob(os.path.join(WORK_DIR, '*')):
            if not f.endswith('.txt'):
                os.remove(f)
                print(f + ' deleted')

    config = namedtuple('TrainConfig',
                        train_config.keys())(*train_config.values())
    model_config = namedtuple('ModelConfig',
                              nn_config.keys())(*nn_config.values())

    with open(os.path.join(WORK_DIR, 'config.json'), 'w') as fh:
        json.dump(nn_config, fh)

    proc = reader.TextProcessor.from_file(os.path.join(WORK_DIR, 'input.txt'))
    proc.create_vocab(model_config.vocab_size)
    train_data = proc.get_vector()
    np.save(os.path.join(WORK_DIR, 'vocab.npy'), np.array(proc.id2word))
    proc.save_converted(os.path.join(WORK_DIR, 'input.conv.txt'))

    perplexity_graph = []
    iter_graph = []

    with tf.Graph().as_default(), tf.Session() as session:
        #         logwriter = tf.summary.FileWriter(WORK_DIR, graph=tf.get_default_graph())
        initializer = tf.random_uniform_initializer(-model_config.init_scale,
                                                    model_config.init_scale)
        with tf.variable_scope('model', reuse=None, initializer=initializer):
            m = model.Model(is_training=True, config=model_config)

        tf.global_variables_initializer().run()
        saver = tf.train.Saver(tf.global_variables())

        for i in range(config.max_max_epoch):
            lr_decay = config.lr_decay**max(i - config.max_epoch, 0.0)
            m.assign_lr(session, config.learning_rate * lr_decay)

            print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
            train_perplexity = run_epoch(session,
                                         m,
                                         train_data,
                                         m.train_op,
                                         verbose=True)
            print("Epoch: %d Train Perplexity: %.3f" %
                  (i + 1, train_perplexity))
            perplexity_graph.append(train_perplexity)
            iter_graph.append(i)

            ckp_path = os.path.join(WORK_DIR, 'model.ckpt')
            saver.save(session, ckp_path, global_step=i)

    plt.plot(iter_graph, perplexity_graph)
    plt.xlabel('iterations')
    plt.ylabel('perplexity')
    #     plt.show()
    plt.savefig(os.path.join(WORK_DIR, 'learning_curve.png'))
Esempio n. 17
0
from computer import Computer
import menu
from time import time, sleep
import globals

if __name__ == "__main__":

    logging.basicConfig(filename='rats.log', level=logging.INFO)

    if config.enable_menu:
        menu.show_menu()

    pygame.init()
    player = Player()
    generator = Generator(player)
    game = model.Model(generator)
    view = render.View(game.map.size)
    measure_time_period = 20
    measure_time_phase = 0
    speed = config.initial_speed
    running = True
    while running:
        if measure_time_phase == 0:
            total_time = 0.0
            game_time = 0.0
        measure_time_phase = (measure_time_phase + 1) % measure_time_period
        start_time = time()
        game.step()
        view.render(game)
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
Esempio n. 18
0
import model

input = ["R : ohio = .5", "R : phil = .5", "W : /nek/ = .6", "W : /naek/ = .4",
"O ohio /nek/ : [nek] = 1", "O phil /nek/ : [nek] = .667", "O phil /nek/ : [naek] = .333",
"O ohio /naek/ : [naek] = 1", "O phil /naek/ : [naek] = 1"]

R = model.Model("R")
W = model.CondModel('W')
O = model.CondModel("O")

OgivR = model.CondModel('OR')

for line in input:
	R.read(line)
	W.read(line)
	O.read(line)
	
OgivNone = model.Model("Onone")

Idata = "I [naek] [nek] [naek]"
IdataSplit = Idata.split()
	
#w values aren't given for each time step, so condition out
for r, w in O:
	for o in O[r,w]:
		OgivR[r][o] += O[r,w][o] / 2
		OgivNone[o] += O[r,w][o] / 4

#for o in OgivNone:
	#print(o, OgivNone[o])
 def _GetNamespace(self, fake_content, filename):
     """Returns a namespace object for the given content"""
     api_def = idl_schema.Process(fake_content, filename)
     m = model.Model()
     return m.AddNamespace(api_def[0], filename)
def sampling_conditional(sketch_data_dir, photo_data_dir, sampling_base_dir,
                         model_base_dir):
    [train_set, valid_set, test_set, hps_model, eval_hps_model, sample_hps_model] = \
        load_env_compatible(sketch_data_dir, photo_data_dir, model_base_dir)
    model_dir = os.path.join(model_base_dir, sample_hps_model.data_type)

    # construct the sketch-rnn model here:
    reset_graph()
    model = sketch_p2s_model.Model(hps_model)
    eval_model = sketch_p2s_model.Model(eval_hps_model, reuse=True)
    sampling_model = sketch_p2s_model.Model(sample_hps_model, reuse=True)

    tfconfig = tf.ConfigProto()
    tfconfig.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=tfconfig)
    sess.run(tf.global_variables_initializer())

    # loads the weights from checkpoint into our model
    load_checkpoint(sess, model_dir)

    for _ in range(20):
        rand_idx = random.randint(0, test_set.num_batches - 1)
        orig_x, unused_point_x, unused_point_l, img_x, img_paths = test_set.get_batch(
            rand_idx)

        img_path = img_paths[0]
        img_name = img_path[img_path.rfind('/') + 1:-4]
        sub_sampling_dir = os.path.join(sampling_base_dir,
                                        sample_hps_model.data_type, img_name)
        os.makedirs(sub_sampling_dir, exist_ok=True)
        print('rand_idx', rand_idx, 'stroke.shape', orig_x[0].shape, img_paths)

        ori_img = img_x[0].astype(np.uint8)
        ori_img_png = Image.fromarray(ori_img, 'RGB')
        ori_img_png.save(os.path.join(sub_sampling_dir, 'photo_gt.png'), 'PNG')
        draw_strokes(orig_x[0], os.path.join(sub_sampling_dir,
                                             'sketch_gt.svg'))

        # encode the image
        common_pix_h = sess.run(sampling_model.pix_h,
                                feed_dict={sampling_model.input_photo: img_x})

        # decoding for sampling
        strokes_out = sample(sess,
                             sampling_model,
                             common_pix_h,
                             eval_model.hps.max_seq_len,
                             temperature=0.1)  # in stroke-3 format
        draw_strokes(strokes_out,
                     os.path.join(sub_sampling_dir, 'sketch_pred.svg'))

        # Create generated grid at various temperatures from 0.1 to 1.0
        stroke_list = []
        for i in range(10):
            for j in range(1):
                print(i, j)
                stroke_list.append([
                    sample(sess,
                           sampling_model,
                           common_pix_h,
                           eval_model.hps.max_seq_len,
                           temperature=0.1), [j, i]
                ])
        stroke_grid = make_grid_svg(stroke_list)
        draw_strokes(stroke_grid,
                     os.path.join(sub_sampling_dir, 'sketch_pred_multi.svg'))
Esempio n. 21
0
                msg = 'Empty mesh, CGX will not start!'
                logging.warning(msg)
                return

            # gui.cgx.kill(w)
            has_nodes = len(self.m.Mesh.nodes)
            gui.cgx.open_inp(self.w, self.j.inp, has_nodes)


# Test importer on all CalculiX examples
if __name__ == '__main__':
    clean.screen()
    os.chdir(os.path.dirname(os.path.realpath(__file__)))
    start_time = time.perf_counter()
    print = tests.print
    m = model.Model() # generate FEM model

    # Prepare logging
    log_file = __file__[:-3] + '.log'
    h = tests.myHandler(log_file) # remove old log file
    log_capture_string = io.StringIO()
    ch = logging.StreamHandler(log_capture_string)
    ch.setLevel(logging.DEBUG)
    fmt = logging.Formatter('%(levelname)s: %(message)s')
    ch.setFormatter(fmt)
    logging.getLogger().addHandler(ch)

    limit = 50000 # how many files to process
    # examples_dir = '../../examples/ccx/test'
    # examples_dir = '../../examples/abaqus/eif'
    # examples_dir = '../../examples/yahoo'
Esempio n. 22
0
def main():
    convert();
    m = model.Model([300, 300], [100, 50], dropout=0.5)
    pcs = multi_training.loadPieces("music")
    multi_training.trainPiece(m, pcs, 10000)
    gen_adaptive(m, pcs, 10, name="composition")
Esempio n. 23
0
 def __init__(self):
     self.model = model.Model()  #crea modelo
     self.view = view.View()  #crea view
     #self.view.connect_contar_clicked(self.on_contar_clicked)
     self.view.connect_interval_changed(
         self.on_combo_changed)  #comprueba si cambió la opción de combobox
Esempio n. 24
0
        chs.append(vocab.get(toks[0], special_words.UNK_ID))
        ids.append(tag_dict[toks[1]])


def gen():
    for chs, ids in zip(all_inp, all_tar):
        yield (chs, ids)


dataset = tf.data.Dataset.from_generator(gen, (tf.int64, tf.int64))
dataset = dataset.padded_batch(args.batch_size, padded_shapes=([-1], [-1]))
# for item in dataset.take(3):
#   print(item[0], item[1])

model_config = configuration.ModelConfig()
tagger = model.Model(None, len(vocab), model_config)
ckpt = tf.train.Checkpoint(tagger=tagger)
ckpt.restore(tf.train.latest_checkpoint(args.ckpt_dir))

accuracy = tf.keras.metrics.Accuracy(name='accuracy')
metric = metrics.TaggerMetric(model_config.n_tags)


def test_step(inp, tar):
    # inp.shape == (batch_size, max_seq_len)
    # tar.shape == (batch_size, max_seq_len)
    padding_mask = data_utils.create_padding_mask(inp)

    pred, potentials = tagger(inp, False, padding_mask)
    accuracy(tar, pred, padding_mask)
    metric(tar, pred, padding_mask)
Esempio n. 25
0
 def _GetNamespace(self, fake_content, filename, is_idl):
   """Returns a namespace object for the given content"""
   api_def = (idl_schema.Process(fake_content, filename) if is_idl
       else json_parse.Parse(fake_content))
   m = model.Model()
   return m.AddNamespace(api_def[0], filename)
Esempio n. 26
0
def _run_inference(output_dir=None,
                   file_extension='png',
                   depth=True,
                   egomotion=False,
                   model_ckpt=None,
                   input_dir=None,
                   input_list_file=None,
                   batch_size=1,
                   img_height=128,
                   img_width=416,
                   seq_length=3,
                   architecture=nets.RESNET,
                   imagenet_norm=True,
                   use_skip=True,
                   joint_encoder=True,
                   shuffle=False,
                   flip_for_depth=False,
                   inference_mode=INFERENCE_MODE_SINGLE,
                   inference_crop=INFERENCE_CROP_NONE,
                   use_masks=False):
    """Runs inference. Refer to flags in inference.py for details."""
    inference_model = model.Model(is_training=False,
                                  batch_size=batch_size,
                                  img_height=img_height,
                                  img_width=img_width,
                                  seq_length=seq_length,
                                  architecture=architecture,
                                  imagenet_norm=imagenet_norm,
                                  use_skip=use_skip,
                                  joint_encoder=joint_encoder)
    check_dir([output_dir + "/Depth/"])
    vars_to_restore = util.get_vars_to_save_and_restore(model_ckpt)
    saver = tf.train.Saver(vars_to_restore)
    sv = tf.train.Supervisor(logdir='/tmp/', saver=None)
    with sv.managed_session() as sess:
        saver.restore(sess, model_ckpt)
        if not gfile.Exists(output_dir):
            gfile.MakeDirs(output_dir)
        logging.info('Predictions will be saved in %s.', output_dir)

        # Collect all images to run inference on.
        im_files, basepath_in = collect_input_images(input_dir,
                                                     input_list_file,
                                                     file_extension)
        if shuffle:
            logging.info('Shuffling data...')
            np.random.shuffle(im_files)
        logging.info('Running inference on %d files.', len(im_files))

        # Create missing output folders and pre-compute target directories.
        output_dirs = create_output_dirs(im_files, basepath_in, output_dir)

        # Run depth prediction network.
        if depth:
            im_batch = []
            for i in range(len(im_files)):
                if i % 100 == 0:
                    logging.info('%s of %s files processed.', i, len(im_files))

                # Read image and run inference.
                if inference_mode == INFERENCE_MODE_SINGLE:
                    if inference_crop == INFERENCE_CROP_NONE:
                        im = util.load_image(im_files[i],
                                             resize=(img_width, img_height))
                    elif inference_crop == INFERENCE_CROP_CITYSCAPES:
                        im = util.crop_cityscapes(util.load_image(im_files[i]),
                                                  resize=(img_width,
                                                          img_height))
                elif inference_mode == INFERENCE_MODE_TRIPLETS:
                    im = util.load_image(im_files[i],
                                         resize=(img_width * 3, img_height))
                    im = im[:, img_width:img_width * 2]
                if flip_for_depth:
                    im = np.flip(im, axis=1)
                im_batch.append(im)

                if len(im_batch) == batch_size or i == len(im_files) - 1:
                    # Call inference on batch.
                    for _ in range(batch_size -
                                   len(im_batch)):  # Fill up batch.
                        im_batch.append(
                            np.zeros(shape=(img_height, img_width, 3),
                                     dtype=np.float32))
                    im_batch = np.stack(im_batch, axis=0)
                    est_depth = inference_model.inference_depth(im_batch, sess)
                    if flip_for_depth:
                        est_depth = np.flip(est_depth, axis=2)
                        im_batch = np.flip(im_batch, axis=2)

                    for j in range(len(im_batch)):
                        color_map = util.normalize_depth_for_display(
                            np.squeeze(est_depth[j]))
                        visualization = np.concatenate(
                            (im_batch[j], color_map), axis=0)

                        # Save raw prediction and color visualization. Extract filename
                        # without extension from full path: e.g. path/to/input_dir/folder1/
                        # file1.png -> file1
                        k = i - len(im_batch) + 1 + j
                        filename_root = os.path.splitext(
                            os.path.basename(im_files[k]))[0]
                        pref = '_flip' if flip_for_depth else ''
                        #output_raw = os.path.join(
                        #    output_dirs[k], "Depth/" + filename_root + pref + '.npy')
                        print("filename_root = ", filename_root)
                        output_vis = os.path.join(
                            output_dirs[k],
                            "Depth/" + filename_root + pref + '.png')
                        #with gfile.Open(output_raw, 'wb') as f:
                        #  np.save(f, est_depth[j])
                        # util.save_image(output_vis, visualization, file_extension)
                        dim = (1242, 375)
                        color_map = cv2.resize(color_map,
                                               dim,
                                               interpolation=cv2.INTER_AREA)
                        util.save_image(output_vis, color_map, file_extension)
                    im_batch = []

        # Run egomotion network.
        if egomotion:
            if inference_mode == INFERENCE_MODE_SINGLE:
                # Run regular egomotion inference loop.
                input_image_seq = []
                input_seg_seq = []
                current_sequence_dir = None
                current_output_handle = None
                for i in range(len(im_files)):
                    sequence_dir = os.path.dirname(im_files[i])
                    if sequence_dir != current_sequence_dir:
                        # Assume start of a new sequence, since this image lies in a
                        # different directory than the previous ones.
                        # Clear egomotion input buffer.
                        output_filepath = os.path.join(output_dirs[i],
                                                       'egomotion.txt')
                        if current_output_handle is not None:
                            current_output_handle.close()
                        current_sequence_dir = sequence_dir
                        logging.info('Writing egomotion sequence to %s.',
                                     output_filepath)
                        current_output_handle = gfile.Open(
                            output_filepath, 'w')
                        input_image_seq = []
                    im = util.load_image(im_files[i],
                                         resize=(img_width, img_height))
                    input_image_seq.append(im)
                    if use_masks:
                        im_seg_path = im_files[i].replace(
                            '.%s' % file_extension, '-seg.%s' % file_extension)
                        if not gfile.Exists(im_seg_path):
                            raise ValueError(
                                'No segmentation mask %s has been found for '
                                'image %s. If none are available, disable '
                                'use_masks.' % (im_seg_path, im_files[i]))
                        input_seg_seq.append(
                            util.load_image(im_seg_path,
                                            resize=(img_width, img_height),
                                            interpolation='nn'))

                    if len(input_image_seq
                           ) < seq_length:  # Buffer not filled yet.
                        continue
                    if len(input_image_seq
                           ) > seq_length:  # Remove oldest entry.
                        del input_image_seq[0]
                        if use_masks:
                            del input_seg_seq[0]

                    input_image_stack = np.concatenate(input_image_seq, axis=2)
                    input_image_stack = np.expand_dims(input_image_stack,
                                                       axis=0)
                    if use_masks:
                        input_image_stack = mask_image_stack(
                            input_image_stack, input_seg_seq)
                    est_egomotion = np.squeeze(
                        inference_model.inference_egomotion(
                            input_image_stack, sess))
                    egomotion_str = []
                    for j in range(seq_length - 1):
                        egomotion_str.append(','.join(
                            [str(d) for d in est_egomotion[j]]))
                    current_output_handle.write(
                        str(i) + ' ' + ' '.join(egomotion_str) + '\n')
                if current_output_handle is not None:
                    current_output_handle.close()
            elif inference_mode == INFERENCE_MODE_TRIPLETS:
                written_before = []
                for i in range(len(im_files)):
                    im = util.load_image(im_files[i],
                                         resize=(img_width * 3, img_height))
                    input_image_stack = np.concatenate([
                        im[:, :img_width], im[:, img_width:img_width * 2],
                        im[:, img_width * 2:]
                    ],
                                                       axis=2)
                    input_image_stack = np.expand_dims(input_image_stack,
                                                       axis=0)
                    if use_masks:
                        im_seg_path = im_files[i].replace(
                            '.%s' % file_extension, '-seg.%s' % file_extension)
                        if not gfile.Exists(im_seg_path):
                            raise ValueError(
                                'No segmentation mask %s has been found for '
                                'image %s. If none are available, disable '
                                'use_masks.' % (im_seg_path, im_files[i]))
                        seg = util.load_image(im_seg_path,
                                              resize=(img_width * 3,
                                                      img_height),
                                              interpolation='nn')
                        input_seg_seq = [
                            seg[:, :img_width], seg[:,
                                                    img_width:img_width * 2],
                            seg[:, img_width * 2:]
                        ]
                        input_image_stack = mask_image_stack(
                            input_image_stack, input_seg_seq)
                    est_egomotion = inference_model.inference_egomotion(
                        input_image_stack, sess)
                    est_egomotion = np.squeeze(est_egomotion)
                    egomotion_1_2 = ','.join(
                        [str(d) for d in est_egomotion[0]])
                    egomotion_2_3 = ','.join(
                        [str(d) for d in est_egomotion[1]])

                    output_filepath = os.path.join(output_dirs[i],
                                                   'egomotion.txt')
                    file_mode = 'w' if output_filepath not in written_before else 'a'
                    with gfile.Open(output_filepath,
                                    file_mode) as current_output_handle:
                        current_output_handle.write(
                            str(i) + ' ' + egomotion_1_2 + ' ' +
                            egomotion_2_3 + '\n')
                    written_before.append(output_filepath)
            logging.info('Done.')
 def loadProject(self, prj):
     if not prj: return
     self.currentActivePrj = prj
     self.project.currentActiveProject = prj
     self.tab.Unbind(
         wx.aui.EVT_AUINOTEBOOK_PAGE_CHANGED
     )  # need to unbind to not catch page delete event b/c we only want to catch page selection event
     if not self.model:
         self.cleanupTabs()
         self.model = t = namedtuple(
             'model', 'model uml instance modelDetails instanceDetails')
         t.model = model.Model()
         prj.setSAFplusModel(t.model)
         modelFile = os.path.join(prj.directory(),
                                  prj.model.children()[0].strip())
         t.model.load(modelFile)
         t.uml = umlEditor.Panel(self.tab, self.guiPlaces, t.model)
         self.tab.InsertPage(0, t.uml, self.getCurrentPageText(0))
         t.modelDetails = entityDetailsDialog.Panel(self.tab,
                                                    self.guiPlaces,
                                                    t.model,
                                                    isDetailInstance=False)
         self.tab.InsertPage(1, t.modelDetails, self.getCurrentPageText(1))
         t.instance = instanceEditor.Panel(self.tab, self.guiPlaces,
                                           t.model)
         self.tab.InsertPage(2, t.instance, self.getCurrentPageText(2))
         t.instanceDetails = entityDetailsDialog.Panel(
             self.tab, self.guiPlaces, t.model, isDetailInstance=True)
         self.tab.InsertPage(3, t.instanceDetails,
                             self.getCurrentPageText(3))
     else:
         print 'OnProjectLoaded: model is not None'
         self.cleanupTools()
         self.cleanupMenus()
         t = self.model
         prj.setSAFplusModel(t.model)
         modelFile = os.path.join(prj.directory(),
                                  prj.model.children()[0].strip())
         t.model.init()
         t.model.load(modelFile)
         if t.uml:
             t.uml.setModelData(t.model)
             t.uml.deleteTools()
             t.uml.addTools()
             t.uml.refresh()
         else:
             t.uml = umlEditor.Panel(self.tab, self.guiPlaces, t.model)
             self.tab.InsertPage(0,
                                 t.uml,
                                 self.getCurrentPageText(0),
                                 select=True)
         if t.instance:
             t.instance.setModelData(t.model)
             t.instance.refresh()
             t.instance.addTools()
         else:
             t.instance = instanceEditor.Panel(self.tab, self.guiPlaces,
                                               t.model)
             self.tab.InsertPage(2, t.instance, self.getCurrentPageText(2))
         if t.instanceDetails:
             t.instanceDetails.setModelData(t.model)
             t.instanceDetails.refresh()
         else:
             t.instanceDetails = entityDetailsDialog.Panel(
                 self.tab, self.guiPlaces, t.model, isDetailInstance=True)
             self.tab.InsertPage(3, t.instanceDetails,
                                 self.getCurrentPageText(3))
         if t.modelDetails:
             t.modelDetails.refresh()
         else:
             t.modelDetails = entityDetailsDialog.Panel(
                 self.tab, self.guiPlaces, t.model, isDetailInstance=False)
             self.tab.InsertPage(1, t.modelDetails,
                                 self.getCurrentPageText(1))
         self.setPagesText()
     self.tab.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CHANGED,
                   self.onPageChanged)  # bind to catch page selection event
     self.tab.SetSelection(0)  # open uml model view by default
     # append to recent projects repository and update the menu
     self.updateRecentProject(prj)
     if self.currentActivePrj.dataModelPlugin:
         self.currentActivePrj.dataModelPlugin.init(t, self.guiPlaces)
Esempio n. 28
0
                outdim = vals[-1]
                layer_dims = vals[1:-1]
                continue
            else:
                vals = line.split(' ')
                vals = [num(x) for x in vals]
                indat.append(vals[:indim])
                targets.append(vals[indim:])

    indat = np.array(indat)
    targets = np.array(targets)

    # print "Input Data : %s \nOutput Data : %s\n" % (indat, targets)

    # build the model
    mm = model.Model(indim)
    last_dim = indim
    for ldim in layer_dims:
        mm.push_layer(layer.Dense(ldim, last_dim))
        last_dim = ldim
    mm.push_layer(layer.Dense(outdim, last_dim, activation="none"))

    print str(mm)

    print "Beginning training session\n"

    mm.train(indat, targets, 1, epochs)

    for line in sys.stdin:
        inp = [float(x) for x in line.strip().split(' ')]
        print mm.predict(np.array([inp]))
Esempio n. 29
0
### トレーニングデータ用意  ###################

# トレーニングデータを取得する
train = dl.getTrainValues()
train_ = train[((34 - WINDOW_SIZE + 1) <= train.date_block_num)
               & (train.date_block_num <= 33)].reset_index(drop=True)
train_y = train_['item_cnt_month']
train_x = train_.drop(columns=['date_block_num', 'item_cnt_month'])

#log.info(train_y.head())
log.info(train_y.count())
#log.info(train_x.head())
log.info(train_x.count())

model = model.Model()
model.fit(train_x.values, train_y.values)

log.info('feature_importances')
log.info(model.get_feature_importances(train_x))

pred = model.predict(train_x)
score = model.predictScore(train_y.values, pred)

log.info('predictScore')
log.info(score)

#テストデータに適用
test = dl.getTestValues()

test_ = train[(train.date_block_num == 34)].reset_index(drop=True)
Esempio n. 30
0
 def init_model(self):
     self.model = model.Model()