def compute(target_word, language): error = "" if language != "": dm_dict = language_codes[language][0] dictionary = language_codes[language][2] else: dm_dict = language_codes["English"][0] dictionary = language_codes["English"][2] logging.exception(language) if target_word not in dm_dict: target_word = "error" error = "<img src='http://www.openmeaning.org/static/sad-robot.png' width='100%'/><br><b>Sorry, the computer has not learnt this word yet. Try again!</b>" neighbours = sim_to_matrix(dm_dict, dm_dict[target_word], 50) '''Make figure''' m = [] labels = [] for n in neighbours[:10]: labels.append(n) m.append(dm_dict[n]) pca.fit(m) m_2d = pca.transform(m) figdata_png = make_figure(m_2d, labels) '''Return more neighbours, with dictionary links''' neighbour_links = "" for n in neighbours: new_link = "<a href='https://" + dictionary + ".wiktionary.org/wiki/" + n + "'/>" + n + "</a> | " neighbour_links += new_link return figdata_png, error, neighbour_links
def send_figure_to_graph( annotations_table_data, annotation_type, image_files_data, annotations_store ): if annotations_table_data is not None: filename = image_files_data["files"][image_files_data["current"]] # convert table rows to those understood by fig.update_layout fig_shapes = [table_row_to_shape(sh) for sh in annotations_table_data] debug_print("fig_shapes:", fig_shapes) debug_print( "annotations_store[%s]['shapes']:" % (filename,), annotations_store[filename]["shapes"], ) # find the shapes that are new new_shapes_i = [] old_shapes_i = [] for i, sh in enumerate(fig_shapes): if not shape_in(annotations_store[filename]["shapes"])(sh): new_shapes_i.append(i) else: old_shapes_i.append(i) # add timestamps to the new shapes for i in new_shapes_i: fig_shapes[i]["timestamp"] = time_passed(annotations_store["starttime"]) # find the old shapes and look up their timestamps for i in old_shapes_i: old_shape_i = index_of_shape( annotations_store[filename]["shapes"], fig_shapes[i] ) fig_shapes[i]["timestamp"] = annotations_store[filename]["shapes"][ old_shape_i ]["timestamp"] shapes = fig_shapes debug_print("shapes:", shapes) fig = make_figure(filename, mode=DEFAULT_FIG_MODE) fig.update_layout( { "shapes": [shape_data_remove_timestamp(sh) for sh in shapes], # 'newshape.line.color': color_dict[annotation_type], # reduce space between image and graph edges "newshape.line.color": color_dict[annotation_type], "margin": dict(l=0, r=0, b=0, t=0, pad=4), } ) annotations_store[filename]["shapes"] = shapes return (fig, annotations_store, [{"Timestamp": s["timestamp"]} for s in shapes]) return dash.no_update
def radio_pressed(image_files, mode, snap, store_data): """ When radio button changed OR current file changed, update figure. """ ctx = dash.callback_context if not ctx.triggered: button_id = 'No clicks yet' else: button_id = ctx.triggered[0]['prop_id'].split('.')[0] print(button_id) path = None if image_files: filename = image_files['files'][image_files['current']] else: filename = filelist[0] fig = make_figure(filename, mode=mode, dragmode='drawclosedpath') fig['layout']['shapes'] = store_data[image_files['files'][ image_files['current']]]['shapes'] fig['layout']['newshape']['line']['color'] = color_dict['car'] fig['layout']['uirevision'] = filename short_filename = os.path.join('assets', os.path.basename(filename)) if button_id == 'snap': path = path_to_indices(store_data[filename]['shapes'][-1]['path']) t = np.linspace(0, 1, len(path)) t_full = np.linspace(0, 1, 80) interp_row = interp1d(t, path[:, 0]) interp_col = interp1d(t, path[:, 1]) path = np.array([interp_row(t_full), interp_col(t_full)]).T if path is not None: img = io.imread(short_filename, as_gray=True) snake = segmentation.active_contour( filters.gaussian(img, 3), path[:, ::-1], alpha=0.002, beta=0.001, #gamma=0.001, coordinates='rc') path = indices_to_path(snake[:, ::-1]) new_shape = dict(store_data[filename]['shapes'][-1]) new_shape['path'] = path new_shape['line']['color'] = 'orange' fig['layout']['shapes'] += (new_shape, ) return fig
def main(config, resume): # setup data_loader instances data_loader = getattr(module_data, config['data_loader']['type'])( config['data_loader']['args']['file'], config['data_loader']['args']['batch_size'], config['data_loader']['args']['max_seq_len'], config['data_loader']['args']['num_bands'], shuffle=False, validation_split=0.0, num_workers=2) # build model architecture model = get_instance(module_arch, 'arch', config) model.summary() # get function handles of loss and metrics loss_fn = getattr(module_loss, config['loss']) metric_fns = [getattr(module_metric, met) for met in config['metrics']] # load state dict checkpoint = torch.load(resume) state_dict = checkpoint['state_dict'] if config['n_gpu'] > 1: model = torch.nn.DataParallel(model) model.load_state_dict(state_dict) # prepare model for testing device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = model.to(device) model.eval() total_loss = 0.0 total_metrics = torch.zeros(len(metric_fns)) # directory for saving images save_dir = os.path.split(resume)[0] save_dir = os.path.join(save_dir, 'test') ensure_dir(save_dir) with torch.no_grad(): for i, (data, target) in enumerate(tqdm(data_loader)): data, target = data.to(device), target.to(device) states = model.init_hidden(data.size(0)) output = model(data, states) # prepare figures for display gt = target.cpu().numpy() # (batch, time, :) mu = output['pred_mean'][0].detach().cpu().numpy() for b in range(gt.shape[0]): gt_ = gt[b].reshape(gt.shape[1], 32, 3) mu_ = mu.reshape(mu.shape[0], 32, 3) for j in range(32): img_prefix = os.path.join( save_dir, 'idx_' + str(i * gt.shape[0] + b) + '_jt_' + str(j)) gt__ = gt_[:, j, :] mu__ = mu_[:, j, :] fig = make_figure(gt__) fig.savefig(img_prefix + '_gt.png') plt.close(fig) fig = make_figure(mu__) fig.savefig(img_prefix + '_pred.png') plt.close(fig) # computing loss, metrics on test set loss = loss_fn(output, target) batch_size = data.shape[0] total_loss += loss.item() * batch_size for i, metric in enumerate(metric_fns): total_metrics[i] += metric(output, target) * batch_size n_samples = len(data_loader.sampler) log = {'loss': total_loss / n_samples} log.update({ met.__name__: total_metrics[i].item() / n_samples for i, met in enumerate(metric_fns) }) print(log)
}, 'https://unpkg.com/[email protected]/build/grids-responsive-min.css', #'https://unpkg.com/[email protected]/build/base-min.css', ], ) filelist = [ app.get_asset_url('lung_ct.jpg'), app.get_asset_url('mri_head.jpg'), app.get_asset_url('astronaut.png'), app.get_asset_url('rocket.jpg') ] server = app.server fig = make_figure(filelist[0], mode='layout', dragmode='drawclosedpath') fig['layout']['newshape']['line']['color'] = color_dict['car'] app.layout = html.Div(children=[ html.Div(children=[ html.H3("Outline the contour of objects"), html.Button('Magic scissors', id='snap'), html.H5("How to display images", style={'margin-top': '2em'}), dcc.RadioItems(id='mode', options=[{ 'label': 'trace', 'value': 'trace' }, { 'label': 'layout', 'value': 'layout' }],
new_shape[k] = shape[k] return new_shape external_stylesheets = ["assets/style.css", "assets/app_bounding_box_style.css"] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) filelist = [ app.get_asset_url("driving.jpg"), app.get_asset_url("professional-transport-autos-bridge-traffic-road-rush-hour.jpg"), app.get_asset_url("rocket.jpg"), ] server = app.server fig = make_figure(filelist[0], mode=DEFAULT_FIG_MODE) fig.update_layout( { "newshape.line.color": color_dict[DEFAULT_ATYPE], "margin": dict(l=0, r=0, b=0, t=0, pad=4), } ) app.layout = html.Div( id="main", children=[ # Banner display html.Div( id="banner", children=[ html.H1("Bounding Box Classification App", id="title"),
def _train_epoch(self, epoch): """ Training logic for an epoch :param epoch: Current training epoch. :return: A log that contains all information you want to save. Note: If you have additional information to record, for example: > additional_log = {"x": x, "y": y} merge it with log before return. i.e. > log = {**log, **additional_log} > return log The metrics in log must have the key 'metrics'. """ self.model.train() total_loss = 0 total_metrics = np.zeros(len(self.metrics)) for batch_idx, (data, target) in enumerate(self.data_loader): data, target = data.to(self.device), target.to(self.device) ## # new hidden state for each batch # data is in (batch, time, :) format states = self.model.init_hidden(data.size(0)) self.optimizer.zero_grad() output = self.model(data, states) loss = self.loss(output, data.view(data.size(0), data.size(1), -1, 96)) loss.backward() torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0) self.optimizer.step() self.writer.set_step((epoch - 1) * len(self.data_loader) + batch_idx) self.writer.add_scalar('loss', loss.item()) total_loss += loss.item() total_metrics += self._eval_metrics(output, target) if self.verbosity >= 2 and batch_idx % self.log_step == 0: self.logger.info('Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}'.format( epoch, batch_idx * self.data_loader.batch_size, self.data_loader.n_samples, 100.0 * batch_idx / len(self.data_loader), loss.item())) if batch_idx == 0: # prepare figures for display gt = target.cpu().numpy() # (batch, time, :) gt = gt[0].reshape(gt.shape[1], 32, 3) gt = gt[:,0,:] self.writer.add_figure('ground_truth/'+str(epoch)+'/'+str(batch_idx), make_figure(gt)) # pick the mean as the predicted value, since it is most probable at any time # step and predictions at all time steps are independent given the latent variable(s) mu = sum(output['pred_means'])[0].detach().cpu().numpy() mu = mu.reshape(mu.shape[0], 32, 3) mu = mu[:,0,:] # logvar = output['pred_logvar'][0].detach().cpu().numpy() # logvar = logvar.reshape(logvar.shape[0], 32, 3) # logvar = logvar[:,0,:] # epsilon = np.random.randn(*mu.shape) # x = np.exp(logvar/2.)*epsilon + mu self.writer.add_figure('prediction/'+str(epoch)+'/'+str(batch_idx), make_figure(mu)) log = { 'loss': total_loss / len(self.data_loader), 'metrics': (total_metrics / len(self.data_loader)).tolist() } if self.do_validation: val_log = self._valid_epoch(epoch) log = {**log, **val_log} if self.lr_scheduler is not None: self.lr_scheduler.step() return log