def main(args): out_dir = os.path.join(args.out_dir, args.dataset, args.model, args.preprocessing, 'rs_{}'.format(args.rs)) # create output directory os.makedirs(out_dir, exist_ok=True) util.clear_dir(out_dir) # create logger logger = util.get_logger(os.path.join(out_dir, 'log.txt')) logger.info(args) logger.info('\ntimestamp: {}'.format(datetime.now())) # run experiment experiment(args, logger, out_dir)
def main(args): # define output directory out_dir = os.path.join(args.out_dir, args.dataset, args.model, args.preprocessing, args.surrogate, args.metric, 'rs_{}'.format(args.rs)) # create output directory and clear any previous contents os.makedirs(out_dir, exist_ok=True) util.clear_dir(out_dir) # create logger logger = util.get_logger(os.path.join(out_dir, 'log.txt')) logger.info(args) logger.info('\ntimestamp: {}'.format(datetime.now())) # run experiment experiment(args, out_dir, logger)
def main(args): # change name of dataset if using a subset of the dataset if args.train_frac < 1.0 and args.train_frac > 0.0: dataset = '{}_{}'.format(args.dataset, str(args.train_frac).replace('.', 'p')) # leave name unchanged else: dataset = args.dataset # change preprocessing if using `feature_path` or `feature_output` with `cb` if args.model == 'cb' and ('feature_path' in args.method or 'feature_output' in args.method): args.preprocessing = 'standard' # Leaf Influence cannot handle cat. features... elif args.method == 'leaf_influence': args.preprocessing = 'standard' # define output directory out_dir = os.path.join(args.out_dir, dataset, args.model, args.preprocessing, args.method, 'rs_{}'.format(args.rs)) # create output directory and clear any previous contents os.makedirs(out_dir, exist_ok=True) util.clear_dir(out_dir) # create logger logger = util.get_logger(os.path.join(out_dir, 'log.txt')) logger.info(args) logger.info('\ntimestamp: {}'.format(datetime.now())) # run experiment experiment(args, logger, out_dir)
def prepare_directories(self): self.path = os.path.join(self.root_path, self.experiment['path']) self.protos_path = os.path.join(self.path, 'protos') self.solver_path = os.path.join(self.protos_path, 'solver.prototxt') self.model_path = os.path.join(self.protos_path, 'train_val.prototxt') self.logs_path = os.path.join(self.path, 'logs') self.scripts_path = os.path.join(self.path, 'scripts') if self.replace_mode == 1 and os.path.exists(self.protos_path): self.prepare_protos() if self.replace_mode in [0, 1] and os.path.exists(self.path): self.snapshots_path = self.get_snapshots_directory() return for d in [self.protos_path, self.logs_path, self.scripts_path]: clear_dir(d) self.prepare_protos() self.prepare_scripts() self.snapshots_path = self.get_snapshots_directory() clear_dir(self.snapshots_path)
def main(args): # define output directory out_dir = os.path.join(args.out_dir, args.dataset, args.model, args.preprocessing, 'rs_{}'.format(args.rs)) # create outut directory and clear any previous contents os.makedirs(out_dir, exist_ok=True) util.clear_dir(out_dir) # create logger logger = util.get_logger(os.path.join(out_dir, 'log.txt')) logger.info(args) logger.info('\ntimestamp: {}'.format(datetime.now())) # write everything printed to stdout to this log file logfile, stdout, stderr = util.stdout_stderr_to_log( os.path.join(out_dir, 'log+.txt')) # run experiment experiment(args, logger, out_dir, seed=args.rs) # restore original stdout and stderr settings util.reset_stdout_stderr(logfile, stdout, stderr)
def main(args): for i in range(args.n_repeats): # define output directory out_dir = os.path.join(args.out_dir, args.dataset, args.model, 'flip_{}'.format(args.flip_frac), args.method, 'rs_{}'.format(args.rs)) # create output directory and clear any previous contents os.makedirs(out_dir, exist_ok=True) util.clear_dir(out_dir) # create logger logger = util.get_logger(os.path.join(out_dir, 'log.txt')) logger.info(args) logger.info('\ntimestamp: {}'.format(datetime.now())) # run experiment experiment(args, logger, out_dir) util.remove_logger(logger) args.rs += 1
def split_parts_for_image(start_y, preproces_path, out_dir, dir_name, original_dir, collectData, splitMode): try: # 第三步图片 segmented_img = cv2.imread(get_seg_file_name(preproces_path)) # 原始图 original_img = cv2.imread( get_specified_dir(original_dir, dir_name + ".jpg")) # 原图宽度 original_width = original_img.shape[1] original_resize = original_width / 1000 width = segmented_img.shape[1] height = segmented_img.shape[0] #定义区域 min_area = 50 max_area = width * height # BGR=>灰度图 mask = cv2.cvtColor((segmented_img != 0).astype(np.uint8), cv2.COLOR_BGR2GRAY) # 部件图 partImages = [] # 边缘部件 part_remove_left = 0 part_remove_right = 0 while True: # 将mask转化为1维数组 # 返回数组mask中值不为零的元素的下标, nz = np.nonzero(mask.flatten())[0].flatten() if len(nz) == 0: break nz_i = 0 found_mask = None found_image = None while True: index = nz[nz_i] seed_x = index % width # 向下取整 seed_y = index // width ff_mask = np.zeros((height + 2, width + 2), dtype=np.uint8) area, _, __, rect = cv2.floodFill(mask, ff_mask, (seed_x, seed_y), 255, flags=cv2.FLOODFILL_MASK_ONLY | cv2.FLOODFILL_FIXED_RANGE) x = rect[0] y = rect[1] w = rect[2] h = rect[3] # slicing into found rect roi_mask = ff_mask[y + 1:y + 1 + h, x + 1:x + 1 + w] found = False if min_area < area < max_area: found_mask = roi_mask newX = x * 4 newY = (y * 4) + start_y newW = w * 4 newH = h * 4 # 边界模式,跳出循环 if splitMode == "full": if newX <= 0: found_mask = None part_remove_left += 1 if newW + newX >= original_width: found_mask = None part_remove_right += 1 startX = newX - 30 endX = newX + newW + 30 startY = newY - 30 endY = newY + newH + 30 found_image = original_img[startY:endY, startX:endX].copy() found = True # clearing found component in the mask mask[y:y + h, x:x + w][roi_mask != 0] = 0 if found: break nz_i += 1 if nz_i >= len(nz): break if found_mask is not None: partImages.append(found_image) # 如果有多个零件,创建目录保存 # hasmorepart = len(partImages) > 1 # if hasmorepart: out_dir = get_specified_dir(out_dir, dir_name) if os.path.exists(out_dir): clear_dir(out_dir) else: create_dir(out_dir) # 分割信息 if part_remove_left or part_remove_right: fl = open(out_dir + "/data.json", 'w') r_data = { "left_remove": part_remove_left, "right_remove": part_remove_right } fl.write(json.dumps(r_data, ensure_ascii=False, indent=2)) fl.close() # 输出第二部分 part_index = 0 for part in partImages: title = os.path.splitext(os.path.split(preproces_path)[1])[0] file_name = os.path.join("", "%s_%02d.png" % (title, part_index)) # if hasmorepart: out_file = os.path.join(out_dir, "%s_%02d.png" % (title, part_index)) # else: # out_file = os.path.join(out_dir, "%s.png" % (title)) cv2.imwrite(out_file, part) # 支持数据采集 if collectData: color_thief = ColorThief(out_file) dominant_color = color_thief.get_color(quality=1) h, w = part.shape[:2] datas = { "name": file_name, "w": w, "h": h, "area": w * h, "rgb": dominant_color } filePath = out_dir + "/data.json" if (os.path.exists(filePath)): fl = open(filePath, 'a') else: fl = open(filePath, 'w') fl.write(json.dumps(datas, ensure_ascii=False, indent=2)) fl.close() part_index += 1 return dir_name + ".png", True except Exception as _: return dir_name + ".png", False