def test(self, args): init_op = tf.global_variables_initializer() self.sess.run(init_op) self.dataio = dataio(None, None, None, None, args.test_data) if self.load(args.checkpoint_dir): print("successfully loaded") else: print("failed to load") if args.out_type == 'class': network = self.prediction elif args.out_type == 'feature': network = self.fc6 else: raise NotImplementedError for idx in range(self.dataio.test_frames): print(self.dataio.test_names[idx]) sys.stdout.flush() data = self.dataio.test_data[idx] feed_dict = {self.data: data, self.keep_prob: 1.0} output = self.sess.run(network, feed_dict=feed_dict) if args.out_type == 'class': output = np.log(output + np.exp(-100)) name = os.path.basename(self.dataio.test_names[idx]) name = os.path.join(args.test_dir, name) self.dataio.save_data(name, output)
def train(self, args): if args.retrain and self.load(args.checkpoint_dir): print("successfully loaded") else: print("failed to load") sys.stdout.flush() counter = 0 start_time = time.time() self.dataio = dataio(args.train_genuine, args.train_spoof, args.dev_genuine, args.dev_spoof, batch_size=self.batch_size) n_epoch = args.epoch batch_idxs = self.dataio.frames // self.batch_size lr = args.lr previous_acc = self.dev_acc(0) early_stop = 0 for epoch in range(n_epoch): self.dataio.shuffle() for idx in range(0, batch_idxs): batch_x, batch_y = self.dataio.batch() feed_dict = { self.data: batch_x, self.y: batch_y, self.lr: lr, self.keep_prob: args.keep_prob } _ = self.sess.run(self.optim, feed_dict=feed_dict) if counter % args.print_freq == 0: feed_dict[self.keep_prob] = 1.0 loss, acc = self.sess.run( [self.cross_entropy, self.accuracy], feed_dict=feed_dict) print("Epoch: [%2d %4d/%4d], loss: [%.6f], acc: [%.2f%%]" % (epoch + 1, idx, batch_idxs, loss, acc * 100.0)) sys.stdout.flush() if np.mod(counter, args.save_freq) == 2: self.save(args.checkpoint_dir, counter) counter += 1 acc = self.dev_acc(epoch + 1) if previous_acc > acc: early_stop += 1 lr = lr * args.dlr print('set learning rate: %.12f' % lr) sys.stdout.flush() else: early_stop = 0 previous_acc = acc
def do_forward(dfile, ofile): data = dataio.dataio(args['<file>'], dfile, model.l_0.W.shape[1]).astype(np.float32) ndata = data.shape[0] f = open(ofile, 'wb') for i in range(0, ndata, mbsize): x_batch = data[i:i+mbsize] if gpu: x_batch = cuda.to_gpu(x_batch) y = forward(x_batch).data if gpu: y = cuda.to_cpu(y) y.byteswap(be) # If save with big-endian, do byteswap. save(y, f) f.close() for l in open(args['<file>']): i, o = l.rstrip().split() do_forward(i, o)
tarf = args["<tar>"] trainf = args["<file>"] model, actfs = dataio.loadnn(nn) if gpu: cuda.get_device(0).use() model.to_gpu() optimizer = optimizers.MomentumSGD(lr=lr, momentum=mm) optimizer.setup(model.collect_parameters()) nlayer = len(actfs) idim = model.l_0.W.data.shape[1] odim = getattr(model, "l_" + str(nlayer - 1)).W.data.shape[0] data = dataio.dataio(trainf, dataform, idim).astype(np.float32) if ttype == "c": forward = forward_cross_entoropy tar = dataio.dataio(tarf, tarform).astype(np.int32) elif ttype == "f": forward = forward_mse tar = dataio.dataio(tarf, tarform, odim).astype(np.float32) ndata = data.shape[0] nmb = ndata / mbsize np.random.seed(seed) for ep in range(epoch): mse = 0.0 mean_acc = 0.0 mb = np.random.permutation(ndata)
re = float(args['--re']) actf = args['--af'] seed = int(args['--seed']) visnum = int(args['<visnum>']) hidnum = int(args['<hidnum>']) if rbmtype!="gb" and rbmtype!="bb": util.stderr("Unknown RBM type: %s" % rbmtype) elif rbmtype == "gb": rbm = gbRBM(visnum, hidnum, seed=seed) else: rbm = bbRBM(visnum, hidnum) af = dataio.str2actf(actf) data = dataio.dataio(args['<file>'], args['--df'], visnum).astype(np.float32) ndata = data.shape[0] if not gpu: trainer = rbm.train_cpu xp = np else: trainer = rbm.train_gpu xp = cupy cuda.check_cuda_available() cuda.get_device(0).use() rbm.to_gpu() rbm.init_grads() np.random.seed(seed) mbnum = ndata / mbsize
array.tofile(of) def savetxt(array, of): np.savetxt(of, array, fmt="%+.9e") if __name__=='__main__': args = docopt(__doc__, argv=sys.argv[1:]) gpu = not bool(args['--cpu']) nnf = args['--nn'] otype = args['--ot'] of = args['--of'] df = args['--df'] mbsize = int(args['--mbsize']) model, actfs = dataio.loadnn(nnf) nlayer = len(actfs) data = dataio.dataio(args['<file>'], df, model.l_0.W.data.shape[1]).astype(np.float32) if gpu: cuda.init() model.to_gpu() if otype == 'f4be': be = True save = savebin elif otype == 'f4ne' or otype == 'f4le': be = False save = savebin elif otype == 'text': be = False save = savetxt else: