def command(): # Training settings parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument( '--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') args = parser.parse_args() print(argsPrint(args)) return args
height = np.min([i.shape[0] for i in imgs]) end_pos = start_pos + img_width if (ch == 1): imgs = [i[:height, start_pos:end_pos] for i in imgs] else: imgs = [i[:height, start_pos:end_pos, :] for i in imgs] imgs = [IMG.resize(i, rate) for i in imgs] header_size = (30, int(img_width * rate), 3) imgs = [titleInsert(i, t, header_size) for i, t in zip(imgs, text)] return stackImages(imgs, thick=1, color=(0, 0, 0)) def main(args): ch = IMG.getCh(args.channel) imgs = [cv2.imread(name, ch) for name in args.image] #text = ['[hitotsume]', '[futatsume]', '[mittsume]'] img = concat3Images(imgs, args.offset, args.img_width, args.channel, args.img_rate) cv2.imshow('test', img) cv2.waitKey() cv2.imwrite(getFilePath(args.out_path, 'concat', '.jpg'), img) if __name__ == '__main__': args = command() argsPrint(args) main(args)
def main(args): # 画像を読み込む imgs = [cv2.imread(name) for name in args.jpeg if IMG.isImgPath(name)] # concatするためにすべての画像の高さを統一する h = np.max([img.shape[0] for img in imgs]) imgs = [IMG.resize(img, h / img.shape[0]) for img in imgs] # concatするためにすべての画像の幅を統一する flg = cv2.BORDER_REFLECT_101 w = np.max([img.shape[1] for img in imgs]) imgs = [makeBorder(img, 0, 0, 0, w - img.shape[1], flg) for img in imgs] # 画像に黒縁を追加する flg = cv2.BORDER_CONSTANT lw = args.line_width imgs = [makeBorder(img, 0, lw, 0, lw, flg, (0, 0, 0)) for img in imgs] # 縦横に連結するための画像リストと縦横情報を取得する imgs, size = stackImgAndShape(imgs, args.row) # 画像を連結してリサイズする buf = [np.vstack(imgs[s]) for s in size] img = IMG.resize(np.hstack(buf), args.resize) # 連結された画像を保存する name = F.getFilePath(args.out_path, 'concat', '.jpg') print('save:', name) cv2.imwrite(name, img) if __name__ == '__main__': args = command() F.argsPrint(args) main(args)