def has_next_day(dates_dict, year, month, day): """Return next day found in nested dates_dict or None if can't find one.""" # Check current month for next days days = sorted(dates_dict[year][month].keys()) if day != last(days): di = days.index(day) next_day = days[di + 1] return {"year": year, "month": month, "day": next_day} # dates_dict[year][month][next_day]) # Check current year for next months months = sorted(dates_dict[year].keys()) if month != last(months): mi = months.index(month) next_month = months[mi + 1] next_day = first(sorted(dates_dict[year][next_month].keys())) return {"year": year, "month": next_month, "day": next_day} # Check for next years years = sorted(dates_dict.keys()) if year != last(years): yi = years.index(year) next_year = years[yi + 1] next_month = first(sorted(dates_dict[next_year].keys())) next_day = first(sorted(dates_dict[next_year][next_month].keys())) return {"year": next_year, "month": next_month, "day": next_day} return False
def has_previous_day(dates_dict, year, month, day): """Return previous day found in nested dates_dict or None if can't find one.""" days = sorted(dates_dict[year][month].keys()) # Check current month if day != first(days): di = days.index(day) prev_day = days[di - 1] return {"year": year, "month": month, "day": prev_day} # Check current year months = sorted(dates_dict[year].keys()) if month != first(months): mi = months.index(month) prev_month = months[mi - 1] last_day = last(sorted(dates_dict[year][prev_month].keys())) return {"year": year, "month": prev_month, "day": last_day} # Check other years years = sorted(dates_dict.keys()) if year != first(years): yi = years.index(year) prev_year = years[yi - 1] prev_month = last(sorted(dates_dict[prev_year].keys())) last_day = last(sorted(dates_dict[prev_year][prev_month].keys())) return {"year": prev_year, "month": prev_month, "day": last_day} return False
def entrypoint(dockerfile): "Return the entrypoint, if declared" f = dockerfile.split("\n")[::-1] # reverse the lines try: entry_line = first(filter(lambda x: "ENTRYPOINT" in x, f)) except StopIteration as e: # No ENTRYPOINT line was found return None else: res = last(entry_line.partition("ENTRYPOINT")).strip() try: return json.loads(res) except: return res.split() return None
def __new__(cls, *parts): if len(parts) > 2: res = reduce(lambda x, y: ConsPair(y, x), reversed(parts)) elif len(parts) == 2: car_part = first(parts) cdr_part = last(parts) try: res = cons_merge(car_part, cdr_part) except NotImplementedError: instance = super(ConsPair, cls).__new__(cls) instance.car = car_part instance.cdr = cdr_part res = instance else: raise ValueError('Number of arguments must be greater than 2.') return res
def inference(self, model_inputs): """ Internal inference methods :param model_input: transformed model input data :return: list of inference output in NDArray """ with torch.no_grad(): heatmaps = toolz.last( self.model(model_inputs['panorama']['scaled'])) gaussians = [] hh, hw = heatmaps.shape[2:] for h in heatmaps.squeeze(0): gaussians.append(torch.softmax(h.view(-1), dim=0).view(hh, hw)) gaussians = torch.stack(gaussians).unsqueeze(0) coords = self.com(self.grid(model_inputs['panorama']['scaled']), gaussians) self.cuboid.floor_distance = model_inputs['floor_distance'] coords = self.cuboid(coords) return toolz.merge({ 'coords': coords.squeeze(), }, model_inputs)
def sumDigits(ints: PVector[int]) -> int: return last(accumulate(add, concat(map(lambda c: toDigits(c), ints))))
def _evaluate_epoch_impl(self, data_loader, args): if args.log.grad_histogram != 'none': for name, net in self.fdn.named_nets().items(): param = toolz.first(net.parameters()) self.log_writer.add_histogram(name + "/top", param.data.cpu().numpy(), self.global_step) if param.grad is not None: self.log_writer.add_histogram( name + "/top/grad", param.grad.data.cpu().numpy(), self.global_step) param = toolz.last(net.parameters()) self.log_writer.add_histogram(name + "/bottom", param.data.cpu().numpy(), self.global_step) if param.grad is not None: self.log_writer.add_histogram( name + "/bottom/grad", param.grad.data.cpu().numpy(), self.global_step) if args.log.correlation != 'none' or args.log.pr != 'none' or args.log.viz_2d != 'none': code_s1_all = [] code_s2_all = [] code_a1_all = [] code_a2_all = [] else: code_s1_all = None code_s2_all = None code_a1_all = None code_a2_all = None if args.log.viz_2d != 'none': domain1_all = [] domain2_all = [] else: domain1_all = None domain2_all = None if args.log.viz_2d != 'none' or args.log.pr_match != 'none': label1_all = [] label2_all = [] else: label1_all = None label2_all = None if args.log.feature != 'none': data1_all = [] data2_all = [] else: data1_all = None data2_all = None if args.log.transform_image != 'none': transform_images = [] ndom = data_loader.dataset.num_domain() for i in range(ndom): tmp = [] for j in range(ndom): tmp.append(None) transform_images.append(tmp) else: transform_images = None for batch_idx, (data, target) in enumerate(data_loader): if (batch_idx + 1) > args.eval_batch: break print("\rEvaluating: %d/%d..." % (batch_idx + 1, len(data_loader)), end='') data1 = data[0].to(args.device) data2 = data[1].to(args.device) target1 = target[0] target2 = target[1] domain1 = target1[0] domain2 = target2[0] label1 = target1[1] label2 = target2[1] # Forward if code_s1_all is not None: code_s1, code_a1 = self.fdn.encode(data1) code_s2, code_a2 = self.fdn.encode(data2) else: code_s1 = code_a1 = code_s2 = code_a2 = None # Append output to list if code_s1_all is not None: code_s1_all.append(code_s1.cpu().numpy()) code_s2_all.append(code_s2.cpu().numpy()) if code_a1_all is not None: code_a1_all.append(code_a1.cpu().numpy()) code_a2_all.append(code_a2.cpu().numpy()) if domain1_all is not None: domain1_all.append(domain1.cpu().numpy()) domain2_all.append(domain2.cpu().numpy()) if label1_all is not None: if isinstance(label1, torch.Tensor): label1_all.append(label1.cpu().numpy()) label2_all.append(label2.cpu().numpy()) elif isinstance(label1, list): label1_all += label1 label2_all += label2 else: raise NotImplementedError # label1_all.append(label1) # label2_all.append(label2) if data1_all is not None: data1_all.append(data1.cpu().numpy()) data2_all.append(data2.cpu().numpy()) # For transform image if args.log.transform_image != 'none': dom1 = domain1[0].item() dom2 = domain2[0].item() if transform_images[dom1][dom2] is None or transform_images[ dom2][dom1] is None: if transform_images[dom1][dom2] is None: data1_less = data1[:1] data2_less = data2[-1:] else: data1_less = data2[:1] data2_less = data1[-1:] dom1, dom2 = dom2, dom1 code_s1, code_a1 = self.fdn.encode(data1_less) code_s2, code_a2 = self.fdn.encode(data2_less) code_a0 = torch.zeros_like(code_a1, device=code_a1.device) trans_img = self.fdn.decode(code_s1, code_a2) zero_app_img = self.fdn.decode(code_s1, code_a0) transform_images[dom1][dom2] = torch.cat((torch.cat( (data1_less, data2_less), dim=3), torch.cat((trans_img, zero_app_img), dim=3)), dim=2) transform_images[dom1][dom2] = batch_transform( transform_images[dom1][dom2], args.inv_transform) transform_images[dom1][dom2] = transform_images[dom1][ dom2].squeeze(dim=0).permute([1, 2, 0]).cpu().numpy() print('done.') if args.log.correlation != 'none' or args.log.pr != 'none' or args.log.viz_2d != 'none': code_s1_all = np.concatenate(code_s1_all) code_s2_all = np.concatenate(code_s2_all) if args.log.correlation != 'none' or args.log.pr != 'none' or args.log.viz_2d != 'none': code_a1_all = np.concatenate(code_a1_all) code_a2_all = np.concatenate(code_a2_all) if args.log.viz_2d != 'none' or args.log.feature != 'none': label1_all = np.concatenate(label1_all) label2_all = np.concatenate(label2_all) if args.log.feature != 'none': domain1_all = np.concatenate(domain1_all) domain2_all = np.concatenate(domain2_all) data1_all = np.concatenate(data1_all) data2_all = np.concatenate(data2_all) def compute_pr(code1_all, code2_all): nc = code1_all.shape[0] code1_all_flatten = code1_all.reshape([code1_all.shape[0], -1]).copy() code2_all_flatten = code2_all.reshape([code2_all.shape[0], -1]).copy() code1_all_flatten /= np.linalg.norm(code1_all_flatten, axis=1, keepdims=True) code2_all_flatten /= np.linalg.norm(code2_all_flatten, axis=1, keepdims=True) _scores = np.matmul(code1_all_flatten, code2_all_flatten.transpose()) _mscore = np.max(_scores, axis=0) _pick = np.argmax(_scores, axis=0) # Notice: Here we use args.place_threshold * 2 because check_match() will devide it by 2 _correct = [ 1 if check_match(_pick[_i], _i, args.place_threshold * 2) else 0 for _i in range(0, nc) ] _correctness = np.count_nonzero(_correct) _precision, _recall, threshold = precision_recall_curve( _correct, _mscore) _precision, _recall = smooth_pr(_precision, _recall) _curr_auc = auc(_recall, _precision) return _precision, _recall, _curr_auc, _scores, _mscore, _pick, _correctness, _correct if args.log.pr != 'none': precision, recall, curr_auc, scores, mscore, pick, correctness, correct = compute_pr( code_s1_all, code_s2_all) elif args.log.pr_match != 'none': _, _, _, _, _, pick, _, _ = compute_pr(code_s1_all, code_s2_all) if args.log.pr != 'none': n_all = pick.shape[0] accuracy = correctness / n_all if hasattr(args, "usetex") and args.usetex: matplotlib.rcParams['text.usetex'] = True plt.figure(1) plt.clf() plt.plot(recall, precision, '-.', label='AUC=%.2f' % curr_auc, linewidth=2) plt.title('PR') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.1]) self.log_writer.add_figure("PR_smoothed", plt.gcf(), self.global_step, dest=args.log.pr) plt.figure(2) plt.clf() plt.imshow(scores, cmap='jet') plt.gca().xaxis.set_ticks_position('top') plt.colorbar() self.log_writer.add_figure("similarity_matrix", plt.gcf(), self.global_step, dest=args.log.pr) _, _, _, scores_a, _, pick_a, correctness_a, _ = compute_pr( code_a1_all, code_a2_all) plt.figure(3) plt.clf() plt.imshow(scores_a, cmap='jet') plt.gca().xaxis.set_ticks_position('top') plt.colorbar() self.log_writer.add_figure("similarity_matrix_of_a", plt.gcf(), self.global_step, dest=args.log.pr) self.log_writer.add_pr_curve("PR", correct, mscore, self.global_step) self.log_writer.add_scalar("AUC", curr_auc, self.global_step) self.log_writer.add_scalar("accuracy", accuracy, self.global_step) print("AUC=%.3f" % curr_auc) print("Accuracy=%d/%d=%.3f" % (correctness, n_all, accuracy)) if args.log.pr_match != 'none': # self.log_writer.add_matrix("pick/%s" % args.model_name, pick.astype(int), self.global_step, dest=args.log.pr_match) N2 = len(data_loader.dataset.datasets[1]) pick_m = pick[0:N2] mscore_m = mscore[0:N2] print(len(label1_all)) label1_all_m = [label1_all[p] for p in pick_m] label2_all_m = label2_all[0:N2] label_all = [ '%s %s %1.4f' % (label2, label1, score) for label1, label2, score in zip(label1_all_m, label2_all_m, mscore_m) ] self.log_writer.add_scalars("pick/%s" % args.model_name, dict(zip(label_all, pick.astype(int))), self.global_step, dest=args.log.pr_match) self.log_writer.add_scalars("correct/%s" % args.model_name, dict(zip(label_all, correct)), self.global_step, dest=args.log.pr_match) db_dir = self.log_writer.get_dir("pick_db/%s" % args.model_name) q_dir = self.log_writer.get_dir("pick_q/%s" % args.model_name) for i in range(N2): # print(label_all[i]) label1, label2, _ = label_all[i].split(' ') # print(label1) # print(label2) shutil.copyfile(label1, "%s/%05d.png" % (q_dir, i)) shutil.copyfile(label2, "%s/%05d.png" % (db_dir, i)) if args.log.viz_2d != 'none': real_s1 = label1_all[:, 0:len(args.mean_s)] real_a1 = label1_all[:, -len(args.mean_a1):] real_s2 = label2_all[:, 0:len(args.mean_s)] real_a2 = label2_all[:, -len(args.mean_a2):] feat_s1 = code_s1_all.reshape([code_s1_all.shape[0], -1]) feat_a1 = code_a1_all.reshape([code_a1_all.shape[0], -1]) feat_s2 = code_s2_all.reshape([code_s2_all.shape[0], -1]) feat_a2 = code_a2_all.reshape([code_a2_all.shape[0], -1]) x1 = np.concatenate((real_s1, real_a1, feat_s1, feat_a1), axis=1) x2 = np.concatenate((real_s2, real_a2, feat_s2, feat_a2), axis=1) xylim = [ -1.0 + min(np.min(x1), np.min(x2)), 1.0 + max(np.max(x1), np.max(x2)) ] xymax = max(abs(xylim[0]), abs(xylim[1])) x_label = [] x_label += ['$\\hat{s}_%d$' % i for i in range(real_s1.shape[1])] x_label += ['$\\hat{a}_%d$' % i for i in range(real_a1.shape[1])] x_label += ['$s_%d$' % i for i in range(feat_s1.shape[1])] x_label += ['$a_%d$' % i for i in range(feat_a1.shape[1])] ndim = x1.shape[1] plt.figure(4, figsize=(10, 10)) plt.clf() if hasattr(args, "usetex") and args.usetex: plt.rc('text', usetex=True) plt.rc('font', family='serif') for ind1 in range(ndim): for ind2 in range(0, ind1 + 1): plt.subplot(ndim, ndim, ind1 * ndim + ind2 + 1) plt.plot(x1[:, ind2], x1[:, ind1], 'r.', markersize=2) plt.plot(x2[:, ind2], x2[:, ind1], 'g.', markersize=2) plt.plot([-xymax, +xymax], [-xymax, +xymax], '--', color='black', linewidth=1.0, alpha=0.2) plt.plot([-xymax, +xymax], [+xymax, -xymax], '--', color='black', linewidth=1.0, alpha=0.2) plt.xlim(xylim) plt.ylim(xylim) for ind in range(ndim): plt.subplot(ndim, ndim, (ndim - 1) * ndim + ind + 1) plt.xlabel(x_label[ind]) plt.subplot(ndim, ndim, ind * ndim + 1) plt.ylabel(x_label[ind]) self.log_writer.add_figure("viz_2d", plt.gcf(), self.global_step, dest=args.log.viz_2d) if args.log.correlation != 'none': def peasonnr(x, y): x = x - np.mean(x, axis=0, keepdims=True) y = y - np.mean(y, axis=0, keepdims=True) x_norm = np.expand_dims(np.linalg.norm(x, axis=0), axis=1) y_norm = np.expand_dims(np.linalg.norm(y, axis=0), axis=0) return np.matmul(np.transpose(x), y) / x_norm / y_norm cor1 = peasonnr( code_s1_all.copy().reshape([code_s1_all.shape[0], -1]), code_a1_all.copy().reshape([code_a1_all.shape[0], -1])) cor2 = peasonnr( code_s2_all.copy().reshape([code_s2_all.shape[0], -1]), code_a2_all.copy().reshape([code_a2_all.shape[0], -1])) plt.figure(5, figsize=(3.45, 3.45), clear=True) if hasattr(args, "usetex") and args.usetex: plt.rc('text', usetex=True) plt.rc('font', family='Times', size=10) plt.tight_layout() cmap_name = 'bwr' plt.subplot(1, 2, 1) plt.imshow(cor1, vmin=-1, vmax=1, cmap=plt.get_cmap(cmap_name)) plt.colorbar(orientation="horizontal", pad=0.2) plt.xlabel('$a_1$') plt.ylabel('$s$') plt.subplot(1, 2, 2) plt.imshow(cor2, vmin=-1, vmax=1, cmap=plt.get_cmap(cmap_name)) plt.colorbar(orientation="horizontal", pad=0.2) plt.xlabel('$a_2$') plt.ylabel('$s$') plt.subplots_adjust(wspace=0.4) self.log_writer.add_figure('correlation', plt.gcf(), self.global_step, dest=args.log.correlation) if args.log.feature != 'none': matfile_data = { 'data1': data1_all, 'data2': data2_all, 'domain1': domain1_all, 'domain2': domain2_all, 'digit1': label1_all, 'digit2': label2_all, 'code_s1': code_s1_all, 'code_s2': code_s2_all, 'code_a1': code_a1_all, 'code_a2': code_a2_all } hdf5storage.write(matfile_data, ".", self.log_writer.get_dir(args.model_name) + "/features.mat", matlab_compatible=True) if args.log.transform_image != 'none': plt.figure(6) plt.clf() ndom = data_loader.dataset.num_domain() for i in range(ndom): for j in range(ndom): if i == j: continue if transform_images[i][j] is None: warnings.warn("transform_images[%d][%d] is None" % (i, j)) continue plt.subplot(ndom, ndom, i * ndom + j + 1) plt.imshow(transform_images[i][j]) plt.axis('off') self.log_writer.add_figure("transform_image", plt.gcf(), self.global_step, dest=args.log.transform_image)
def rlePropLengthPreserved(ints: List) -> bool: return len(ints) == last( accumulate(add, [b for a, b in runLengthEncode(ints)]))
kernel_size=1, ), ) for i in range(stacks - 1) ]) self.stacks = stacks def forward( self, image: torch.Tensor, ) -> typing.List[torch.Tensor]: x = self.pre(image) combined_hm_preds = [] for i in range(self.stacks): hg = self.hgs[i](x) feature = self.features[i](hg) preds = self.outs[i](feature) combined_hm_preds.append(preds) if i < self.stacks - 1: x = x + self.merge_preds[i](preds) + self.merge_features[i]( feature) return combined_hm_preds if __name__ == '__main__': import toolz sh = StackedHourglass() CKPT_PATH = './ckpts/ssc.pth' state_dict = torch.load(CKPT_PATH) sh.load_state_dict(state_dict, strict=True) print(toolz.last(sh(torch.rand(5, 3, 256, 512))).shape)
def is_being_monitored(self) -> bool: if len(self.occurrences) == 0: return False last_occurrence = last(self.occurrences) return False if last_occurrence.state == State.RESOLVED else True
import bs4 from toolz import first, last print( "THIS DOES NOT NEED TO BE RUN BY THE END USER BUT HAS BEEN INCLUDED IN THE REPOSITORY \ LEST IT BE LOST TO THE SANDS OF TIME. Only run if you know what you are doing." ) rates = open('./rates.xml', 'r') soup = bs4.BeautifulSoup(markup=rates.read(), features="xml") rates.close() hotels = soup.find_all('hotel') with open(sys.stdout, 'w') as stdout: stdout.write("Found {0} hotel tags".format(str(len(hotels)))) for hotel in enumerate(hotels): #hotelNum, hotelSoup = first(hotel), last(hotel)) path = "./hotel_xml_files/hotel_{0}.xml".format(first(hotel)) if not exists("./hotel_xml_files"): raise SystemExit( "subfolder {0}/hotel_xml_files not found, nowhere to write to". format(getcwd())) with open(path, 'w') as output: output.write("<!-- begin hotel {0} -->\n".format(first(hotel))) outputString = last(hotel).prettify() output.write(outputString) output.write("<!-- end hotel {0} -->\n".format(first(hotel))) with open("./hotel_xml_files/README.txt", 'w') as readme: readme.write( "These files were generated by the hotels.py script in the directory above. This was done to make handling each hotel XML tree much more managable on slower systems by splitting the original 18MB rates.xml file by each individual hotel tag. It was also necessary to format the individual <hotel> nodes specifically by properly indenting the child tags to make the still-large XML files much more readable for humans and thereby expedite development time. Two test hotel XML trees in particular were selected and moved to the parent directory, AAAAAA.xml and HNLADR.xml. hotels.py is a convenience script to aid in the process of developing the main script and will not be updated heretofore. " .replace(". ", "\n"))
def sample_from_counter(counter): choices, weights = zip(*counter.items()) cumdist = list(itertools.accumulate(weights)) dice_roll = random.randrange(toolz.last(cumdist)) return choices[bisect.bisect(cumdist, dice_roll)]
def tail(self): return tz.last(self)
def __getitem__(self, address): return last(self.path(address))