def test_speed_gpu(self): a = cp.array([[1, 2, 1, 2], [3, 4, 3, 4], [5, 5, 5, 5]], dtype=cp.float32) b = cp.array([[10, 2, 10, 2], [1, 1, 11, 1]], dtype=cp.float32) ans = iouutils.ious(a, b) print(ans) print(ans.shape)
def test_acc_gpu(self): boxes = cp.array([[0, 0, 10, 10], [2, 3, 4, 5]], dtype=cp.float32) query_boxes = cp.array( [[0, 0, 10, 10], [2, 2, 15, 15], [5, 5, 15, 15], [20, 20, 22, 30]], dtype=cp.float32) ious = iouutils.ious(boxes, query_boxes) print(ious) print(ious.shape)
def get_class_weights(self, gpu_id=-1): classes = [x[1] for x in self._pairs] collect = collections.Counter(classes) weights = [float(item[1]) for item in collect.iteritems()] total = sum(weights) weights[:] = [1.0 - (x / total) for x in weights] if gpu_id >= 0: return cupy.array(weights, "float32") else: return numpy.array(weights, "float32")
def generate_anchors(base_size=16, ratios=[0.5, 1, 2], scales=2**xp.arange(3, 6)): """ Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, 15, 15) window. """ base_anchor = xp.array([1, 1, base_size, base_size]) - 1 ratio_anchors = _ratio_enum(base_anchor, xp.asarray(ratios)) anchors = xp.vstack([_scale_enum(ratio_anchors[i, :], scales) for i in range(ratio_anchors.shape[0])]) return anchors
def run_model(): model = MLPXOR() if CUDA: chainer.cuda.get_device_from_id(0).use() model.to_gpu() data_in = xp.array([[0, 0], [0, 1], [1, 0], [1, 1]]).astype('f') data_out = xp.array([0, 1, 1, 0]).astype('i').reshape((4, 1)) optimizer = optimizers.SGD(1.0) optimizer.setup(model) x = Variable(data_in) y = Variable(data_out) errors = [] for it in range(1, ITERATIONS): model.cleargrads() loss = F.sigmoid_cross_entropy(model(x), y) errors.append(loss.data) loss.backward() optimizer.update() return errors
def makevar(x): return Variable(xp.array([x], dtype=xp.int32))
def makevar(arr): return Variable(xp.array(arr, dtype=xp.int32))
def main(): init_random() model = NeuralNet() if args.gpu: cuda.get_device(0).use() model.to_gpu(0) train, test = chainer.datasets.get_cifar10() xs, ts = train._datasets txs, tts = test._datasets xs = cp.array(xs.reshape((len(xs), 3, 32, 32))) ts = cp.array(ts) txs = cp.array(txs.reshape((len(txs), 3, 32, 32))) tts = cp.array(tts) def train_model(num_epochs, model, unchain=False): init_random() xp = model.xp optimizer = chainer.optimizers.Adam() optimizer.setup(model) acc_train = [] loss_train = [] acc_test = [] for i in range(num_epochs): loss_train_log = [] n_loop = int(len(xs) // batch_size) optimizer.new_epoch() for j in range(n_loop): model.zerograds() x = xs[j * batch_size:(j + 1) * batch_size] t = ts[j * batch_size:(j + 1) * batch_size] t = Variable(t) y = model(x, unchain=unchain) loss = F.softmax_cross_entropy(y, t) loss.backward() optimizer.update() loss_train_log.append(xp.mean(loss.data)) acc_train.append(get_acc(model, xs, ts)) loss_train.append(float(np.sum(loss_train_log) / n_loop)) acc_test.append(get_acc(model, txs, tts)) return { 'acc(train)': acc_train, 'acc(test)': acc_test, 'loss(train)': loss_train } if not (args.basic or args.pruning or args.iteration or args.minimal): data = train_model(n_maxtrain, model) stack_result('test.png', data) elif args.basic or args.pruning or args.iteration: pretrain_data = train_model(n_pretrain, model) if args.basic: # basic score model0 = model.copy('copy') base_data = train_model(n_maxtrain - n_pretrain, model0, args.unchain) base_train = pretrain_data['acc(train)'] + base_data['acc(train)'] base_test = pretrain_data['acc(test)'] + base_data['acc(test)'] base_loss = pretrain_data['loss(train)'] + base_data['loss(train)'] stack_result( 'basic_unchain.png' if args.unchain else 'basic.png', { 'acc(train)': base_train, 'acc(test)': base_test, 'loss(train)': base_loss }) del model0 if args.pruning: # train pruned net model1, nodes1 = model.prun(0.65) model2, nodes2 = model.prun(0.75) prun_data1 = train_model(n_maxtrain - n_pretrain, model1, args.unchain) prun_data2 = train_model(n_maxtrain - n_pretrain, model2, args.unchain) prun_train1 = pretrain_data['acc(train)'] + prun_data1['acc(train)'] prun_test1 = pretrain_data['acc(test)'] + prun_data1['acc(test)'] prun_loss1 = pretrain_data['loss(train)'] + prun_data1['loss(train)'] prun_train2 = pretrain_data['acc(train)'] + prun_data2['acc(train)'] prun_test2 = pretrain_data['acc(test)'] + prun_data2['acc(test)'] prun_loss2 = pretrain_data['loss(train)'] + prun_data2['loss(train)'] stack_result( 'pruning_unchain.png' if args.unchain else 'pruning.png', { f'acc(train1_{nodes1})': prun_train1, f'acc(test1_{nodes1}': prun_test1, f'acc(train2_{nodes2})': prun_train2, f'acc(test2_{nodes2}': prun_test2, f'loss(train1_{nodes1})': prun_loss1, f'loss(train2_{nodes2}': prun_loss2 }) del model1, model2 if args.iteration: # train pruned net prun_train1 = pretrain_data['acc(train)'][:] prun_test1 = pretrain_data['acc(test)'][:] prun_loss1 = pretrain_data['loss(train)'][:] prun_train2 = pretrain_data['acc(train)'][:] prun_test2 = pretrain_data['acc(test)'][:] prun_loss2 = pretrain_data['loss(train)'][:] model1 = model.copy('copy') model2 = model.copy('copy') for i in range(n_maxtrain - n_pretrain): model1, nodes1 = model1.prun(0.067595123) # (0.65^15)^f-1 model2, nodes2 = model2.prun(0.142304101) # (0.9^15)^f-1 prun_data1 = train_model(1, model1, args.unchain) prun_data2 = train_model(1, model2, args.unchain) prun_train1 = prun_train1 + prun_data1['acc(train)'] prun_test1 = prun_test1 + prun_data1['acc(test)'] prun_loss1 = prun_loss1 + prun_data1['loss(train)'] prun_train2 = prun_train2 + prun_data2['acc(train)'] prun_test2 = prun_test2 + prun_data2['acc(test)'] prun_loss2 = prun_loss2 + prun_data2['loss(train)'] stack_result( 'iteration_unchain.png' if args.unchain else 'iteration.png', { f'acc(train1_{nodes1})': prun_train1, f'acc(test1_{nodes1}': prun_test1, f'acc(train2_{nodes2})': prun_train2, f'acc(test2_{nodes2}': prun_test2, f'loss(train1_{nodes1})': prun_loss1, f'loss(train2_{nodes1}': prun_loss2 }) del model1, model2 if args.minimal: # basic score model0 = NeuralNet(100) model0.to_gpu(0) pretrain_data = train_model(n_pretrain, model0) mini_data = train_model(n_maxtrain - n_pretrain, model0) mini_train = pretrain_data['acc(train)'] + mini_data['acc(train)'] mini_test = pretrain_data['acc(test)'] + mini_data['acc(test)'] mini_loss = pretrain_data['loss(train)'] + mini_data['loss(train)'] stack_result( 'minimal.png', { 'acc(train_min)': mini_train, 'acc(test_min)': mini_test, 'loss(train_min)': mini_loss }) del model0
def __init__(self, feat_stride=16, anchor_scales=[4, 8, 16, 32]): self._feat_stride = feat_stride self._anchors = xp.asarray( generate_anchors(scales=xp.array(anchor_scales))) self._num_anchors = self._anchors.shape[0]
def test_speed_gpu(self): a = cp.array([[1,2,1,2], [3,4,3,4], [5,5,5,5]], dtype=cp.float32) b = cp.array([[10,2,10,2], [1,1,11,1]], dtype=cp.float32) ans = iouutils.ious(a, b) print(ans) print(ans.shape)
def test_acc_gpu(self): boxes = cp.array([[0, 0, 10, 10], [2, 3, 4, 5]], dtype=cp.float32) query_boxes = cp.array([[0, 0, 10, 10], [2, 2, 15, 15], [5, 5, 15, 15], [20, 20, 22, 30]], dtype=cp.float32) ious = iouutils.ious(boxes, query_boxes) print(ious) print(ious.shape)