예제 #1
0
 def test_colorize(self):
     self.assertEqual(colorize(Colors.YELLOW, 'test'),
                      '\033[93mtest\033[0m')
     self.assertEqual(colorize(Colors.GRAY, 'test'), '\033[90mtest\033[0m')
     self.assertEqual(colorize(Colors.PURPLE, 'test'),
                      '\033[95mtest\033[0m')
     self.assertEqual(colorize(Colors.RED, 'test'), '\033[91mtest\033[0m')
예제 #2
0
    def _spectrogramImageSummary(self):
        complexOutput = self._architecture.output()[0]
        outputSpectrogram = tf.sqrt(
            tf.reduce_sum(tf.square(complexOutput), axis=-1))

        complexTarget = self._architecture.target()[0]
        targetSpectrogram = tf.sqrt(
            tf.reduce_sum(tf.square(complexTarget), axis=-1))

        complexLeft = self._architecture.input()[0, :, :, 0:2]
        leftSpectrogram = tf.sqrt(
            tf.reduce_sum(tf.square(complexLeft), axis=-1))

        complexRight = self._architecture.input()[0, :, :, 2:4]
        rightSpectrogram = tf.sqrt(
            tf.reduce_sum(tf.square(complexRight), axis=-1))

        totalSpectrogram = tf.transpose(
            tf.concat([leftSpectrogram, outputSpectrogram, rightSpectrogram],
                      axis=0))

        return tf.summary.merge([
            tf.summary.image("Original",
                             [colorize(tf.transpose(targetSpectrogram))]),
            tf.summary.image("Generated",
                             [colorize(tf.transpose(outputSpectrogram))]),
            tf.summary.image("Complete", [colorize(totalSpectrogram)])
        ])
예제 #3
0
 def help(cls):
     return '    {}{}{}\n    {}'.format(
         cls.pattern,
         colorize(Colors.GRAY, ' - Example: ' if len(cls.example) == 1 else ' - Examples: '),
         colorize(Colors.GRAY, ' | ').join(cls.example),
         colorize(Colors.GREEN, cls.description)
     )
예제 #4
0
 def one_line_help(cls):
     return '    {:<15} - {}{}{}\n'.format(
         cls.pattern,
         colorize(Colors.GREEN, cls.description),
         colorize(Colors.GRAY, ' Example: ' if len(cls.example) == 1 else ' Examples: '),
         colorize(Colors.GRAY, ' | ').join(cls.example)
     ) if cls.show_in_main_help else ''
예제 #5
0
def test_registration_3d():
#if __name__ == "__main__":
    import rospy, itertools, glob
    from utils.colorize import colorize
    if rospy.get_name() == "/unnamed": rospy.init_node('test_registration_3d',disable_signals=True)
    data_dir = "/home/joschu/Data/rope1"
    
    files = sorted(glob.glob(osp.join(data_dir,"*.txt")))
    
    distmat1 = np.zeros((len(files), len(files)))
    distmat2 = np.zeros((len(files), len(files)))

    for (i0, i1) in itertools.combinations(xrange(12),2):
        print colorize("comparing %s to %s"%(files[i0], files[i1]),'red',bold=True)
        rope0 = np.loadtxt(osp.join(data_dir,files[i0]))
        rope1 = np.loadtxt(osp.join(data_dir,files[i1]))
        f = registration.tps_rpm(rope0, rope1, plotting=True,reg_init=1,reg_final=.1,n_iter=21, verbose=False)
        distmat1[i0, i1] = f.cost
        distmat2[i0, i1] = f.corr_sum


    plt.figure(1)    
    plt.imshow(distmat1)
    plt.title("distances")
    plt.figure(2)
    plt.imshow(distmat2)
    plt.title("corr_sums")
    np.savez("cross_registration_results", distmat = distmat1, names = files)
예제 #6
0
    def handle(self, *args):
        arg = args[0] if args else ''
        titles = self.client.genres_titles
        titles_keys = sorted(titles.keys())

        header = render(
            '\n' + self.INDENT +
            '{{y}}--- GENRES ----------------------------------------------------{{e}}'
            + '\n\n' + self.INDENT)

        footer = render(
            '\n\n' + self.INDENT +
            '{{y}}Start listening by typing{{e}} play {genre} {{y}}command: {{e}}play kpop\n'
        )

        if arg == 'withintro':
            header = self.intro + header
            footer = render(
                '\n' + self.INDENT +
                '{{l}}... {{e-y}}Show more available genres via{{e}} genres {{y}}command{{e}}'
            ) + footer

            titles_keys = titles_keys[:5]  # Trim titles

        if not titles:
            return header + \
                   colorize(Colors.RED, 'Genres list is empty. Seems API isn\'t available. Please, try again later.\n')

        return header + ('\n' + self.INDENT).join(
            colorize(Colors.LIME, k + ' - ') +
            colorize(Colors.LIME, ', ').join(titles[k])
            for k in titles_keys) + footer
예제 #7
0
 def handle(self, *args):
     if self.player:
         if self.player.is_playing:
             self.player.pause()
             return self.INDENT + colorize(Colors.GREEN, 'Track paused.')
         elif self.player.is_paused:
             return self.INDENT + colorize(Colors.RED,
                                           'Track already paused.')
     return self.INDENT + colorize(Colors.RED, 'No active players found.')
예제 #8
0
    def _spectrogramImageSummary(self):
        output = tf.transpose(self._architecture.output()[0])
        target = tf.transpose(self._architecture.target()[0])
        total = tf.transpose(tf.concat([self._architecture.input()[0, :, :, 0], self._architecture.output()[0],
                          self._architecture.input()[0, :, :, 1]], axis = 0))

        return tf.summary.merge([tf.summary.image("Original", [colorize(target)]),
                                tf.summary.image("Generated", [colorize(output)]),
                                tf.summary.image("Complete", [colorize(total)])])
예제 #9
0
 def handle(self, *args):
     if self.player:
         if self.player.is_playing:
             return self.INDENT + colorize(Colors.RED,
                                           'Track is already playing.')
         elif self.player.is_paused:
             self.player.play()
             return self.INDENT + colorize(
                 Colors.BLUE,
                 '\u25B6 ' + self.client.active_station['name'])
     return self.INDENT + colorize(Colors.RED, 'No active players found.')
예제 #10
0
    def handle(self, *args):
        arg = args[0] if args else ''

        if not arg:
            if self.player and self.player.is_paused:
                self.player.play()
                return self.INDENT + colorize(
                    Colors.BLUE,
                    '\u25B6 ' + self.client.active_station['name'])

            self.stdout_print(self.INDENT +
                              colorize(Colors.GRAY, 'Pick random genre...'))
            arg = random.choice(
                [genre.get('title', '') for genre in self.client.genres])

        genre = self.client.search_genre(arg)
        genre_id = genre.get('id') if genre else None

        if genre_id is None:
            return self.INDENT + colorize(Colors.RED,
                                          'Genre ') + arg + colorize(
                                              Colors.RED, ' not found.')

        self.stdout_print(self.INDENT + colorize(Colors.GREEN, 'Tuning in...'))
        self.stdout_print(self.INDENT +
                          colorize(Colors.GREEN, 'Starting genre: ') +
                          genre.get('title', ''))

        num_of_tries = 0
        while num_of_tries < 3:
            num_of_tries += 1
            stream = self.client.get_stream(genre_id,
                                            renew_active_station=True)

            if not stream:
                return self.INDENT + colorize(
                    Colors.RED,
                    'No active stations found... Please, try another genre.')

            if self.player:
                self.player.stop()
            self.player = Player(stream)
            self.player.play()

            num_of_checks = 0
            while num_of_checks < 5:
                num_of_checks += 1
                time.sleep(1)
                if self.player.is_playing:
                    return self.INDENT + colorize(
                        Colors.BLUE,
                        '\u25B6 ' + self.client.active_station['name'])
        return self.INDENT + colorize(
            Colors.RED,
            'No active stations found... Please, try another genre.')
예제 #11
0
def processCroppedImages():
    uploaded_files = request.files.getlist("files")
    print(uploaded_files)
    features = []
    for file in uploaded_files:
        image_buffer = file.read()
        # convert string data to numpy array
        np_image = np.frombuffer(image_buffer, dtype=np.uint8)
        # convert numpy array to image
        image = cv2.imdecode(np_image, flags=1)

        labels = cf.classify(image)
        color_length = len(labels)

        if color_length != 0:  # if no labels are identified then dont check for color
            colors = cl.colorize(image, color_length)
            for color in colors:
                color_values = []
                r, g, b, name = colours[all_colors.index(color)]
                color_values.append({
                    "hex": rgb2hex(r, g, b),
                    "rgb": {
                        "r": r,
                        "g": g,
                        "b": b,
                        "a": 1
                    }
                })
            features.append({"labels": labels, "colors": color_values})

    return jsonify({"success": True, "features": features}), 200
예제 #12
0
    def handle(self, *args):
        if self.player:
            arg = args[0] if args else ''

            if not arg:
                return self.INDENT + colorize(Colors.GREEN,
                                              'Current volume is ') + str(
                                                  self.player.get_volume())

            try:
                self.player.set_volume(int(arg))
            except ValueError:
                return self.INDENT + colorize(Colors.RED, 'Volume value ') + arg + \
                       colorize(Colors.RED, ' isn\'t valid.')
            return self.INDENT + colorize(Colors.GREEN, 'Set volume to ') + arg
        return self.INDENT + colorize(Colors.RED, 'No active players found.')
예제 #13
0
def batch_to_img(b):
    b = b.astype(np.float32)
    b = np.sum(b, axis=3, keepdims=False)
    b = np.reshape(
        b, [batch_size, cfg.VOXVOX_GRID_SIZE[0] * cfg.VOXVOX_GRID_SIZE[1], 1])
    b = np.transpose(b, axes=[1, 0, 2])
    return colorize(b)[np.newaxis, ...]
예제 #14
0
    def _spectrogramImageSummary(self):
        complexOutput = self._architecture.output()[0]
        outputSpectrogram = tf.sqrt(
            tf.reduce_sum(tf.square(complexOutput), axis=-1))

        complexTarget = self._architecture.target()[0]
        targetSpectrogram = tf.sqrt(
            tf.reduce_sum(tf.square(complexTarget), axis=-1))

        complexLeft = self._architecture.input()[0, :, :, 0:2]
        leftSpectrogram = tf.sqrt(
            tf.reduce_sum(tf.square(complexLeft), axis=-1))

        complexRight = self._architecture.input()[0, :, :, 2:4]
        rightSpectrogram = tf.sqrt(
            tf.reduce_sum(tf.square(complexRight), axis=-1))

        complexForward = self._architecture._forwardPrediction[0]
        forwardSpectrogram = tf.sqrt(
            tf.reduce_sum(tf.square(complexForward), axis=-1))

        complexBackward = self._architecture._backwardPrediction[0]
        backwardSpectrogram = tf.sqrt(
            tf.reduce_sum(tf.square(complexBackward), axis=-1))

        totalSpectrogram = tf.transpose(
            tf.concat([leftSpectrogram, outputSpectrogram, rightSpectrogram],
                      axis=0))

        frontPrediction = tf.transpose(
            tf.concat([leftSpectrogram, forwardSpectrogram, rightSpectrogram],
                      axis=0))

        backPrediction = tf.transpose(
            tf.concat([leftSpectrogram, backwardSpectrogram, rightSpectrogram],
                      axis=0))

        original = tf.transpose(
            tf.concat([leftSpectrogram, targetSpectrogram, rightSpectrogram],
                      axis=0))

        return tf.summary.merge([
            tf.summary.image("Original", [colorize(original)]),
            tf.summary.image("Forward", [colorize(frontPrediction)]),
            tf.summary.image("Backward", [colorize(backPrediction)]),
            tf.summary.image("Complete", [colorize(totalSpectrogram)])
        ])
예제 #15
0
파일: pipeline.py 프로젝트: benkehoe/python
def execute_series(pipeline, dry_run=False):
    nodedict = pipeline.graph.node
    ordered_progs = [node for node in nx.topological_sort(pipeline.graph) if nodedict[node]["type"] == "program"]
    for prog in ordered_progs:
        command = nodedict[prog]["command"]
        item2status = pipeline.get_all_status()
        if pipeline.products_already_made(prog, item2status):
            print "next:", colorize("skipping %s"%prog, "red")
            logging.info("skipping %s",prog)
            continue        
        print colorize(command,"red")
        logging.info(command)
        raw_input("press enter to continue")        
        if not dry_run: 
            child = subprocess.Popen(command.split(), env=pipeline.env)
            try:
                interrupted=False
                while child.poll() is None: sleep(.1)
            except KeyboardInterrupt:
                interrupted=True
            if not interrupted and child.returncode != 0:
                raise subprocess.CalledProcessError(child.returncode, command)
예제 #16
0
class Fm(cmd.Cmd):
    INDENT = ' ' * 4

    prompt = colorize(Colors.LIME, '$ fm ')
    intro = render("""
                 _   ___
     ___ _____ _| | |  _|_____
    |  _|     | . |_|  _|     |
    |___|_|_|_|___|_|_| |_|_|_|
    ---------------------------------------------------------------
    {{y}}Welcome to cmd.fm! Use{{e}} play {{y}}command to begin listening.
    For example:{{e}} play chillout{{y}}, {{e}}play dubstep {{y}}etc...
    {{g}}You can use{{e}} help {{g}}command to see all cmd.fm commands.{{e}}
    """)

    @classmethod
    def _bind_handler(cls, cmd):
        def fn(self, *args):
            self.stdout_print(cmd.handle(self, *args))

        setattr(cls, 'do_' + cmd.name, fn)

    @classmethod
    def _bind_help(cls, cmd):
        def fn(self, *args):
            self.stdout_print(cmd.help())

        setattr(cls, 'help_' + cmd.name, fn)

    def __init__(self, client=None, test=False, *args, **kwargs):
        for command in commands:
            Fm._bind_handler(command)
            Fm._bind_help(command)

        self.client = client
        self.player = None
        cmd.Cmd.__init__(self, *args, **kwargs)
        self.commands = commands
        if not test:
            self.intro = self.onecmd('genres withintro')

    def stdout_print(self, text, end='\n'):
        self.stdout.write(text + end)

    def default(self, arg):
        self.stdout_print(self.INDENT +
                          colorize(Colors.RED, 'Unknown command ') + arg)

    def emptyline(self):
        # Do not repeat last used command when user entered empty line
        pass
예제 #17
0
파일: pipeline.py 프로젝트: benkehoe/python
def execute_parallel(pipeline, lifetime, max_lag, noclean = False):
    nodedict = pipeline.graph.node
    target_topic = pipeline.get_target_topic()
    prog2child = {}
    remaining_progs = pipeline.get_programs()
    #plot = PipelinePlot(pipeline)

    try:
        while True:
            item2status = pipeline.get_all_status()
            item2done = pipeline.get_all_doneness(item2status)
            remaining_progs = [prog for prog in remaining_progs if not pipeline.products_already_made(prog, item2done) or pipeline.makes_topic(prog)]
            for prog in remaining_progs:
                if pipeline.ready_to_run(prog, item2status):
                    remaining_progs.remove(prog)
                    command = nodedict[prog]["command"]
                    print colorize(command, "red")                
                    logging.info(command)
                    child = subprocess.Popen(command.split(), env=pipeline.env)
                    prog2child[prog] = child
            for (prog, child) in prog2child.items():
                if child.poll() is not None and child.returncode != 0:
                    print colorize("%s failed"%prog, "red")
                    logging.error("%s failed",prog)
                    for child in prog2child.values():
                        if child.poll() is None: child.terminate()
                    return
            if not noclean: pipeline.cleanup_all(lifetime)
            pipeline.throttle(target_topic, item2status, max_lag)
            #plot.draw()
            sleep(.1)
    except Exception:
        traceback.print_exc()
        for child in prog2child.values(): 
            if child.poll() is None: child.terminate()
            return
예제 #18
0
파일: testing.py 프로젝트: benkehoe/python
def test_all(stop=False):
    nPass,nFail = 0,0
    for (name,func) in TEST_FUNCS.items():
        print colorize("function: %s"%name,"green")
        try:
            t_start = time()
            func()
            t_elapsed = time() - t_start
            print colorize("PASSED (%.3f sec)"%t_elapsed,"blue")
            nPass += 1
        except Exception:    
            traceback.print_exc(file=sys.stdout)
            if stop: raise
            print colorize("FAILED","red")
            nFail += 1
            
            
    print "%i passed, %i failed"%(nPass,nFail)
예제 #19
0
def predict_step(model, batch, anchors, cfg, params, summary=False, vis=False):

  @tf.function
  def distributed_predict_step():
    return model.strategy.run(model._predict_step, args=(batch["feature_buffer"], batch["coordinate_buffer"]))

  tag = batch["tag"].numpy().astype(str)
  if summary or vis:
    batch_gt_boxes3d = label_to_gt_box3d(
    batch["labels"].numpy().astype(str), cls=cfg.DETECT_OBJECT, coordinate='lidar')
  print('predict', tag)

  res = distributed_predict_step()
  if model.strategy.num_replicas_in_sync > 1:
    probs, deltas = tf.concat(res[0].values, axis=0).numpy(), tf.concat(res[1].values, axis=0).numpy()
  else:
    probs, deltas = res[0].numpy(), res[1].numpy()
  batch_boxes3d = delta_to_boxes3d(
        deltas, anchors, coordinate='lidar')
  batch_boxes2d = batch_boxes3d[:, :, [0, 1, 4, 5, 6]]
  batch_probs = probs.reshape((params["batch_size"], -1))

  # NMS
  ret_box3d = []
  ret_score = []
  for batch_id in range(params["batch_size"]):
    # remove box with low score
    ind = np.where(batch_probs[batch_id, :] >= cfg.RPN_SCORE_THRESH)[0]
    tmp_boxes3d = batch_boxes3d[batch_id, ind, ...]
    tmp_boxes2d = batch_boxes2d[batch_id, ind, ...]
    tmp_scores = batch_probs[batch_id, ind].astype(np.float32)
    # TODO: if possible, use rotate NMS
    boxes2d = corner_to_standup_box2d(
        center_to_corner_box2d(tmp_boxes2d, coordinate='lidar')).astype(np.float32)
    ind = tf.image.non_max_suppression(boxes2d, tmp_scores,max_output_size=cfg.RPN_NMS_POST_TOPK, iou_threshold=cfg.RPN_NMS_THRESH )
    ind = ind.numpy()
    tmp_boxes3d = tmp_boxes3d[ind, ...]
    tmp_scores = tmp_scores[ind]
    ret_box3d.append(tmp_boxes3d)
    ret_score.append(tmp_scores)

  ret_box3d_score = []
  for boxes3d, scores in zip(ret_box3d, ret_score):
    ret_box3d_score.append(np.concatenate([np.tile(cfg.DETECT_OBJECT, len(boxes3d))[:, np.newaxis],
                                                boxes3d, scores[:, np.newaxis]], axis=-1))
  
  img = 255. * batch["img"].numpy() #tensorflow scales the image between 0 and 1 when reading it, we need to rescale it between 0 and 255
  if summary:
    # only summry 1 in a batch
    cur_tag = tag[0]
    P, Tr, R = load_calib( os.path.join( cfg.CALIB_DIR, cur_tag + '.txt' ) )

    front_image = draw_lidar_box3d_on_image(img[0], ret_box3d[0], ret_score[0],
                                                  batch_gt_boxes3d[0], P2=P, T_VELO_2_CAM=Tr, R_RECT_0=R)
          
    n_points = batch["num_points"][0].numpy()
    lidar = batch["lidar"][0][0:n_points,].numpy()
    bird_view = lidar_to_bird_view_img(lidar, factor=cfg.BV_LOG_FACTOR)
              
    bird_view = draw_lidar_box3d_on_birdview(bird_view, ret_box3d[0], ret_score[0],
                                                    batch_gt_boxes3d[0], factor=cfg.BV_LOG_FACTOR, P2=P, T_VELO_2_CAM=Tr, R_RECT_0=R)

    heatmap = colorize(probs[0, ...], cfg.BV_LOG_FACTOR)
    return {"tag":tag, "scores":ret_box3d_score, "front_image":tf.expand_dims(front_image, axis=0), 
            "bird_view":tf.expand_dims(bird_view, axis=0), "heatmap":tf.expand_dims(heatmap, axis=0)}

  if vis:
    front_images, bird_views, heatmaps = [], [], []
    for i in range(len(img)):
      cur_tag = tag[i]
      n_points = batch["num_points"][i].numpy()
      lidar = batch["lidar"][i][0:n_points,].numpy()
      P, Tr, R = load_calib( os.path.join( cfg.CALIB_DIR, cur_tag + '.txt' ) )
              
      front_image = draw_lidar_box3d_on_image(img[i], ret_box3d[i], ret_score[i],
                                        batch_gt_boxes3d[i], P2=P, T_VELO_2_CAM=Tr, R_RECT_0=R)
                                        
      bird_view = lidar_to_bird_view_img(lidar, factor=cfg.BV_LOG_FACTOR)
                                        
      bird_view = draw_lidar_box3d_on_birdview(bird_view, ret_box3d[i], ret_score[i],
                                        batch_gt_boxes3d[i], factor=cfg.BV_LOG_FACTOR, P2=P, T_VELO_2_CAM=Tr, R_RECT_0=R)

      heatmap = colorize(probs[i, ...], cfg.BV_LOG_FACTOR)

      front_images.append(front_image)
      bird_views.append(bird_view)
      heatmaps.append(heatmap)
          
    return {"tag":tag, "scores":ret_box3d_score, "front_image":front_images, "bird_view":bird_views, "heatmap":heatmaps}

  return { "tag":tag, "scores":ret_box3d_score}
예제 #20
0
    def writeSummary(self, batch_idx, real_spectrograms,
                     generated_spectrograms, fake_spectrograms, fake_sounds,
                     real_sounds, sampling_rate):
        for summaryName in self._tracked_scalars:
            self._summary_writer.add_scalar(
                summaryName,
                self._tracked_scalars[summaryName] / self._writeInterval,
                global_step=batch_idx)
        self._tracked_scalars = {}

        music_analysis_fake_signal = np.zeros([7, len(fake_sounds)])
        music_analysis_real_signal = np.zeros([7, len(real_sounds)])
        for index, (fake, real) in enumerate(zip(fake_sounds, real_sounds)):
            music_analysis_fake_signal[:, index] = self.musicAnalysis(fake)
            music_analysis_real_signal[:, index] = self.musicAnalysis(real)

        self._summary_writer.add_scalar("MusicAnalysis/Real_beats_confidence",
                                        np.mean(music_analysis_real_signal[0]),
                                        global_step=batch_idx)
        self._summary_writer.add_scalar("MusicAnalysis/Real_dissonance_mean",
                                        np.mean(music_analysis_real_signal[1]),
                                        global_step=batch_idx)
        self._summary_writer.add_scalar("MusicAnalysis/Real_dissonance_std",
                                        np.mean(music_analysis_real_signal[2]),
                                        global_step=batch_idx)
        self._summary_writer.add_scalar(
            "MusicAnalysis/Real_inharmonicity_mean",
            np.mean(music_analysis_real_signal[3]),
            global_step=batch_idx)
        self._summary_writer.add_scalar("MusicAnalysis/Real_inharmonicity_std",
                                        np.mean(music_analysis_real_signal[4]),
                                        global_step=batch_idx)
        self._summary_writer.add_scalar(
            "MusicAnalysis/Real_tuning_frequency_mean",
            np.mean(music_analysis_real_signal[5]),
            global_step=batch_idx)
        self._summary_writer.add_scalar(
            "MusicAnalysis/Real_tuning_frequency_std",
            np.mean(music_analysis_real_signal[6]),
            global_step=batch_idx)

        self._summary_writer.add_scalar("MusicAnalysis/Fake_beats_confidence",
                                        np.mean(music_analysis_fake_signal[0]),
                                        global_step=batch_idx)
        self._summary_writer.add_scalar("MusicAnalysis/Fake_dissonance_mean",
                                        np.mean(music_analysis_fake_signal[1]),
                                        global_step=batch_idx)
        self._summary_writer.add_scalar("MusicAnalysis/Fake_dissonance_std",
                                        np.mean(music_analysis_fake_signal[2]),
                                        global_step=batch_idx)
        self._summary_writer.add_scalar(
            "MusicAnalysis/Fake_inharmonicity_mean",
            np.mean(music_analysis_fake_signal[3]),
            global_step=batch_idx)
        self._summary_writer.add_scalar("MusicAnalysis/Fake_inharmonicity_std",
                                        np.mean(music_analysis_fake_signal[4]),
                                        global_step=batch_idx)
        self._summary_writer.add_scalar(
            "MusicAnalysis/Fake_tuning_frequency_mean",
            np.mean(music_analysis_fake_signal[5]),
            global_step=batch_idx)
        self._summary_writer.add_scalar(
            "MusicAnalysis/Fake_tuning_frequency_std",
            np.mean(music_analysis_fake_signal[6]),
            global_step=batch_idx)

        real_c = consistency((real_spectrograms - 1) * 25)
        fake_c = consistency((generated_spectrograms - 1) * 25)

        mean_R_Con, std_R_Con = real_c.mean(), real_c.std()
        mean_F_Con, std_F_Con = fake_c.mean(), fake_c.std()

        self._summary_writer.add_scalar("Gen/Reg",
                                        torch.abs(mean_R_Con - mean_F_Con),
                                        global_step=batch_idx)
        self._summary_writer.add_scalar("Gen/F_Con",
                                        mean_F_Con,
                                        global_step=batch_idx)
        self._summary_writer.add_scalar("Gen/F_STD_Con",
                                        std_F_Con,
                                        global_step=batch_idx)
        self._summary_writer.add_scalar("Gen/R_Con",
                                        mean_R_Con,
                                        global_step=batch_idx)
        self._summary_writer.add_scalar("Gen/R_STD_Con",
                                        std_R_Con,
                                        global_step=batch_idx)
        self._summary_writer.add_scalar("Gen/STD_diff",
                                        torch.abs(std_F_Con - std_R_Con),
                                        global_step=batch_idx)

        for index in range(4):
            self._summary_writer.add_image("images/Real_Image/" + str(index),
                                           colorize(real_spectrograms[index]),
                                           global_step=batch_idx)
            self._summary_writer.add_image("images/Fake_Image/" + str(index),
                                           colorize(fake_spectrograms[index],
                                                    -1, 1),
                                           global_step=batch_idx)
            self._summary_writer.add_audio('sounds/Gen/' + str(index),
                                           fake_sounds[index] /
                                           (np.abs(fake_sounds[index]).max()),
                                           global_step=batch_idx,
                                           sample_rate=sampling_rate)
            self._summary_writer.add_audio('sounds/Real/' + str(index),
                                           real_sounds[index] /
                                           (np.abs(real_sounds[index]).max()),
                                           global_step=batch_idx,
                                           sample_rate=sampling_rate)
예제 #21
0
파일: model.py 프로젝트: LordLiang/veloNet
    def predict_step(self, session, data, summary=False, vis=False):
        tags = data[0]
        bev_maps = data[1]
        labels = data[2]
        batch_size = len(tags)
        if summary or vis:
            batch_gt_boxes3d = label_to_gt_box3d(labels, cls=self.cls)

        print('predict', tags)
        input_feed = {}
        input_feed[self.is_train] = False
        for idx in range(len(self.avail_gpus)):
            input_feed[self.bev_maps[idx]] = bev_maps[idx * self.single_batch_size:(idx + 1) * self.single_batch_size]

        output_feed = [self.prob_output, self.delta_output]
        probs, deltas = session.run(output_feed, input_feed)
        print('probs', probs.shape)
        print('deltas', deltas.shape)

        batch_boxes3d = delta_to_boxes3d(deltas, self.anchors)
        batch_boxes2d = batch_boxes3d[:, :, [0, 1, 4, 5, 6]]
        batch_probs = probs.reshape((len(self.avail_gpus) * self.single_batch_size, -1))

        # NMS
        print("NMS...")
        ret_box3d = []
        ret_score = []

        for batch_id in range(batch_size):
            # remove box with low score
            ind = np.where(batch_probs[batch_id, :] >= cfg.RPN_SCORE_THRESH)[0]
            tmp_boxes3d = batch_boxes3d[batch_id, ind, ...]
            tmp_boxes2d = batch_boxes2d[batch_id, ind, ...]
            tmp_scores = batch_probs[batch_id, ind]

            ind = box2d_rotate_nms(tmp_boxes2d,
                                   tmp_scores,
                                   max_output_size=cfg.RPN_NMS_POST_TOPK,
                                   iou_threshold=cfg.RPN_NMS_THRESH)

            tmp_boxes3d = tmp_boxes3d[ind, ...]
            tmp_scores = tmp_scores[ind]

            inf = float("inf")

            for i in range(len(ind)-1, -1, -1):
                if 0 in tmp_boxes3d[i, 3:6] or inf in tmp_boxes3d[i, 3:6]:
                    tmp_boxes3d = np.delete(tmp_boxes3d, i, 0)
                    tmp_scores = np.delete(tmp_scores, i, 0)

            ret_box3d.append(tmp_boxes3d)
            ret_score.append(tmp_scores)

        ret_box3d_score = []
        for boxes3d, scores in zip(ret_box3d, ret_score):
            ret_box3d_score.append(np.concatenate([np.tile(self.cls, len(boxes3d))[:, np.newaxis],
                                                   boxes3d, scores[:, np.newaxis]], axis=-1))
        if summary:
            # only summry 1 in a batch
            cur_tag = tags[0]
            cur_img = imread(os.path.join(cfg.IMG_DIR, cur_tag + '.png'))
            P, Tr, R = load_calib(os.path.join(cfg.CALIB_DIR, cur_tag + '.txt'))

            front_image = draw_box3d_on_image(cur_img, ret_box3d[0], ret_score[0],
                                                    batch_gt_boxes3d[0], P2=P, T_VELO_2_CAM=Tr, R_RECT_0=R)

            bird_view = bev_maps[..., 7:10]


            bird_view = draw_lidar_box3d_on_birdview(bird_view, ret_box3d[0], ret_score[0],
                                                     batch_gt_boxes3d[0], factor=cfg.BV_LOG_FACTOR, P2=P,
                                                     T_VELO_2_CAM=Tr, R_RECT_0=R)

            heatmap = colorize(probs[0, ...], cfg.BV_LOG_FACTOR)

            ret_summary = session.run(self.predict_summary, {
                self.rgb: front_image[np.newaxis, ...],
                self.bv: bird_view[np.newaxis, ...],
                self.bv_heatmap: heatmap[np.newaxis, ...]
            })

            return tags, ret_box3d_score, ret_summary

        if vis:
            front_images, bird_views, heatmaps = [], [], []
            for i in range(batch_size):
                cur_tag = tags[i]
                P, Tr, R = load_calib(os.path.join(cfg.CALIB_DIR, cur_tag + '.txt'))

                bird_view = bev_maps[..., 7:10]

                bird_view = draw_lidar_box3d_on_birdview(bird_view, ret_box3d[i], ret_score[i],
                                                         batch_gt_boxes3d[i], factor=cfg.BV_LOG_FACTOR, P2=P,
                                                         T_VELO_2_CAM=Tr, R_RECT_0=R)

                heatmap = colorize(probs[i, ...], cfg.BV_LOG_FACTOR)

                bird_views.append(bird_view)
                heatmaps.append(heatmap)

            return tags, ret_box3d_score, bird_views, heatmaps

        return tags, ret_box3d_score
예제 #22
0
 def default(self, arg):
     self.stdout_print(self.INDENT +
                       colorize(Colors.RED, 'Unknown command ') + arg)
예제 #23
0
#!/usr/bin/env python
import sys
from os.path import exists
from time import sleep
import subprocess
from utils.colorize import colorize

command = sys.argv[1]
dependencies = sys.argv[2:]

while True:
    if all(exists(file) for file in dependencies):
        print colorize(command,'red')
        subprocess.check_call(command,shell=True)
        break
    else:
        sleep(.01)
예제 #24
0
labelGetter = comm.FileGetter(args.label,"bmp",comm.SingleChannelImageMessage)
ropeInitPub = comm.FilePublisher(args.out,"txt")

prev_id = -1
while True:

    
    latest_id = getLastInd(args.label)
    print "latest id", latest_id
    if latest_id == prev_id:        
        sleep(.1)
        continue
    else:
        prev_id = latest_id    
    
    timePrevStart = time()

    xyz,bgr = pcdGetter.recv_id(latest_id).data
    label = labelGetter.recv_id(latest_id).data

    if label is None: raise Exception("could not read label file")
        
    try:
        xyzs,labels = initialize_rope(label,xyz, bgr,plotting=args.plotting)
        ropeInitPub.send(RopeInitMessage(data=(xyzs, labels)))    
        sleep(max(args.pause - (time() - timePrevStart),0))
    except Exception:
        print colorize("exception occurred in rope init:","red")
        traceback.print_exc()
    
예제 #25
0
    def predict(self, data, probs, deltas, summary = False, vis = False):
        '''
        probs: (batch, 2, cfg.FEATURE_HEIGHT, cfg.FEATURE_WIDTH)
        deltas: (batch, 14, cfg.FEATURE_HEIGHT, cfg.FEATURE_WIDTH)
        '''
        tag = data[0]
        label = data[1]
        vox_feature = data[2]
        vox_number = data[3]
        vox_coordinate = data[4]
        img = data[5]
        lidar = data[6]

        batch_size, _, _, _ = probs.shape
        device = probs.device

        batch_gt_boxes3d = None
        if summary or vis:
            batch_gt_boxes3d = label_to_gt_box3d(label, cls = self.cls, coordinate = 'lidar')

        # Move to cpu and convert to numpy array
        probs = probs.cpu().detach().numpy()
        deltas = deltas.cpu().detach().numpy()

        # BOTTLENECK
        batch_boxes3d = delta_to_boxes3d(deltas, self.anchors, coordinate = 'lidar')
        batch_boxes2d = batch_boxes3d[:, :, [0, 1, 4, 5, 6]]
        batch_probs = probs.reshape((batch_size, -1))

        # NMS
        ret_box3d = []
        ret_score = []
        for batch_id in range(batch_size):
            # Remove box with low score
            ind = np.where(batch_probs[batch_id, :] >= cfg.RPN_SCORE_THRESH)[0]
            tmp_boxes3d = batch_boxes3d[batch_id, ind, ...]
            tmp_boxes2d = batch_boxes2d[batch_id, ind, ...]
            tmp_scores = batch_probs[batch_id, ind]

            # TODO: if possible, use rotate NMS
            boxes2d = corner_to_standup_box2d(center_to_corner_box2d(tmp_boxes2d, coordinate = 'lidar'))

            # 2D box index after nms
            ind, cnt = nms(torch.from_numpy(boxes2d).to(device), torch.from_numpy(tmp_scores).to(device),
                           cfg.RPN_NMS_THRESH, cfg.RPN_NMS_POST_TOPK)
            ind = ind[:cnt].cpu().detach().numpy()

            tmp_boxes3d = tmp_boxes3d[ind, ...]
            tmp_scores = tmp_scores[ind]
            ret_box3d.append(tmp_boxes3d)
            ret_score.append(tmp_scores)

        ret_box3d_score = []
        for boxes3d, scores in zip(ret_box3d, ret_score):
            ret_box3d_score.append(np.concatenate([np.tile(self.cls, len(boxes3d))[:, np.newaxis],
                                                   boxes3d, scores[:, np.newaxis]], axis = -1))

        if summary:
            # Only summry the first one in a batch
            cur_tag = tag[0]
            P, Tr, R = load_calib(os.path.join(cfg.CALIB_DIR, cur_tag + '.txt'))

            front_image = draw_lidar_box3d_on_image(img[0], ret_box3d[0], ret_score[0],
                                                    batch_gt_boxes3d[0], P2 = P, T_VELO_2_CAM = Tr, R_RECT_0 = R)

            bird_view = lidar_to_bird_view_img(lidar[0], factor = cfg.BV_LOG_FACTOR)

            bird_view = draw_lidar_box3d_on_birdview(bird_view, ret_box3d[0], ret_score[0], batch_gt_boxes3d[0],
                                                     factor = cfg.BV_LOG_FACTOR, P2 = P, T_VELO_2_CAM = Tr, R_RECT_0 = R)

            heatmap = colorize(probs[0, ...], cfg.BV_LOG_FACTOR)

            ret_summary = [['predict/front_view_rgb', front_image[np.newaxis, ...]],  # [None, cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH, 3]
                           # [None, cfg.BV_LOG_FACTOR * cfg.INPUT_HEIGHT, cfg.BV_LOG_FACTOR * cfg.INPUT_WIDTH, 3]
                           ['predict/bird_view_lidar', bird_view[np.newaxis, ...]],
                           # [None, cfg.BV_LOG_FACTOR * cfg.FEATURE_HEIGHT, cfg.BV_LOG_FACTOR * cfg.FEATURE_WIDTH, 3]
                           ['predict/bird_view_heatmap', heatmap[np.newaxis, ...]]]

            return tag, ret_box3d_score, ret_summary

        if vis:
            front_images, bird_views, heatmaps = [], [], []
            for i in range(len(img)):
                cur_tag = tag[i]
                P, Tr, R = load_calib(os.path.join(cfg.CALIB_DIR, cur_tag + '.txt'))

                front_image = draw_lidar_box3d_on_image(img[i], ret_box3d[i], ret_score[i],
                                                        batch_gt_boxes3d[i], P2 = P, T_VELO_2_CAM = Tr, R_RECT_0 = R)

                bird_view = lidar_to_bird_view_img(lidar[i], factor = cfg.BV_LOG_FACTOR)

                bird_view = draw_lidar_box3d_on_birdview(bird_view, ret_box3d[i], ret_score[i], batch_gt_boxes3d[i],
                                                         factor = cfg.BV_LOG_FACTOR, P2 = P, T_VELO_2_CAM = Tr, R_RECT_0 = R)

                heatmap = colorize(probs[i, ...], cfg.BV_LOG_FACTOR)

                front_images.append(front_image)
                bird_views.append(bird_view)
                heatmaps.append(heatmap)

            return tag, ret_box3d_score, front_images, bird_views, heatmaps

        return tag, ret_box3d_score
예제 #26
0
    verb_group["nargs"] = len(info["arg_names"])
    verb_group["arg_names"] = info["arg_names"]
    verb_group.create_group("tool_transform")
    if "tool_transform" in info:
        verb_group["tool_transform"]["origin"] = info["tool_transform"]["origin"]
        verb_group["tool_transform"]["rotation"] = info["tool_transform"]["rotation"]
    else:
        verb_group["tool_transform"]["origin"] = [0.,0.,0.]
        verb_group["tool_transform"]["rotation"] = [0., 0., 0., 1.]
        
    verb_group.create_group("demos")
verb_lib.close()

for demo_info in verb_demos:
    verb_lib = h5py.File(verb_lib_path, mode="r+")    
    verb = demo_info["verb"]
    verb_group = verb_lib[verb]
    demo_num = len(verb_lib[verb]["demos"])
    demo_name = "traj%.2i"%demo_num
    demo_path = "%s/demos/%i"%(verb, demo_num)
    verb_lib.close()
    cmd = " ".join(["python","db_from_bag.py",osp.join(data_dir,demo_info["bag_file"]),verb_lib_path, demo_path, demo_info["arms_used"]])
    print colorize(cmd,"red")
    subprocess.check_call(cmd, shell=True)
    verb_lib = h5py.File(verb_lib_path, mode="r+")    
    clouds = np.load(osp.join(data_dir, demo_info["seg_file"]))
    verb_lib[demo_path].create_group("object_clouds")
    for (i,arg_name) in enumerate(verb_info[verb]["arg_names"]):
        verb_lib[demo_path]["object_clouds"][str(i)] = clouds[i].astype('float32')
    verb_lib.close()
	def _spectrogramImageSummary(self):
		originalImage = tf.expand_dims(colorize(tf.transpose(self._architecture.input()[0])), 0)
		generatedImage = tf.expand_dims(colorize(tf.transpose(self._architecture.output()[0])), 0)

		return tf.summary.merge([tf.summary.image("Original", originalImage),
								tf.summary.image("Generated", generatedImage)])
예제 #28
0
                                                                    
for i0 in xrange(6):
    rope0 = ropes[i0]
    n0 = len(rope0)
    #pairs = zip(xrange(n0), np.random.randint(0,n0,n0))
    dists0 = dists[i0]

    for i1 in xrange(6):
        rope1 = ropes[i1]

        dists1 = dists[i1]
        
        n1 = len(rope1)
        
        
        print colorize("comparing %s to %s"%(seg_names[i0], seg_names[i1]), "red")
        cost = recognition.calc_match_score(rope0, rope1, dists0, dists1)
            
        results.append((i0, i1, cost))
        print i0, i1, cost
        
distmat = np.zeros((6,6))
for (i0, i1, cost) in results:
    distmat[i0, i1] = cost
distmat[xrange(6),xrange(6)] = np.nan
print distmat.argmin(axis=0)

a,b = np.meshgrid([0,3,1,4,2,5],[0,3,1,4,2,5])
distmat_rearr = distmat[a,b]
distmat_rearr[range(6), range(6)] = np.nan
plt.imshow(distmat_rearr,interpolation='nearest',cmap='gray')
예제 #29
0
 def handle(self, *args):
     if self.player:
         self.player.mute()
         return self.INDENT + colorize(Colors.GREEN, 'Track muted.')
     return self.INDENT + colorize(Colors.RED, 'No active players found.')