def test_indexing_error(test_varnode): if test_varnode: network = Network() else: network = None a = np.arange(9).reshape(3, 3).astype(np.float32) b = np.array([1, 2]) aa = make_tensor(a, network) bb = make_tensor(b, network) with pytest.raises(IndexError): aa[..., ...] # only one ellipsis is allowed with pytest.raises(IndexError): aa[bb, bb, bb] # too many indices with pytest.raises(ValueError): aa[:] = bb # shape mismatch if test_varnode: cc = aa[aa > 4] with pytest.raises(IndexError): cc[...] # does not support ellipsis when tensor's ndim is unknown dd = aa > 4 with pytest.raises(IndexError): cc[..., dd[ dd]] # does not support bool index with unknown shape when using ellipsis
def test_utils_astensor1d(is_varnode): if is_varnode: network = Network() else: network = None reference = make_tensor(0, network) # literal x = [1, 2, 3] for dtype in [None, "float32"]: xx = astensor1d(x, reference, dtype=dtype) assert isinstance(xx, type(reference)) np.testing.assert_equal(xx.numpy(), x) # numpy array x = np.asarray([1, 2, 3], dtype="int32") for dtype in [None, "float32"]: xx = astensor1d(x, reference, dtype=dtype) assert isinstance(xx, type(reference)) np.testing.assert_equal(xx.numpy(), x.astype(dtype) if dtype else x) # tensor x = make_tensor([1, 2, 3], network) for dtype in [None, "float32"]: xx = astensor1d(x, reference, dtype=dtype) assert isinstance(xx, type(reference)) np.testing.assert_equal(xx.numpy(), x.numpy()) # mixed x = [1, make_tensor(2, network), 3] for dtype in [None, "float32"]: xx = astensor1d(x, reference, dtype=dtype) assert isinstance(xx, type(reference)) np.testing.assert_equal(xx.numpy(), [1, 2, 3])
def run(self, data, y_all, unid, expdir, args): wgt_fname = os.path.join(expdir, 'wgts_{}.pt'.format(unid)) losses_fname = os.path.join(expdir, 'losses_{}.npy'.format(unid)) losses = [] dim_x = data.shape[1] model = BaseMLP(dim_x, args['hid_layers']) optimizer = torch.optim.Adam(model.parameters(), lr=args['lr']) for step in tqdm(range(args['n_iterations'])): logits = model(make_tensor(data.values)) labels = make_tensor(y_all.values) loss = nn.functional.binary_cross_entropy_with_logits(logits, \ labels) weight_norm = model.weight_norm() loss += args['l2_reg'] * weight_norm #Do the backprop loss.backward() optimizer.step() optimizer.zero_grad() #Printing and Logging if step % 1000 == 0: logging.info([np.int32(step), loss.detach().cpu().numpy()]) #Store results losses.append(loss.detach().numpy()) np.save(losses_fname, losses) torch.save(model.state_dict(), wgt_fname)
def test_broadcast_auto_infer(is_varnode): if is_varnode: network = Network() else: network = None x = np.random.random((1, 2, 3)).astype(np.float32) xx = make_tensor(x, network) for shape in [ (1, 2, 3), (1, None, 3), ]: yy = F.broadcast_to(xx, shape) np.testing.assert_equal(yy.numpy(), x) with pytest.raises(ValueError): F.broadcast_to(xx, (1, -1, 3)) with pytest.raises(ValueError): F.broadcast_to(xx, (None, 1, 2, 3)) F.broadcast_to(xx, (1, None, 2, 3)) t = make_tensor(2, network) F.broadcast_to(xx, (t, None, 2, 3))
def __init__(self): print('\n------------ Options for base ------------\n') name_models = ['UNET', 'SUNET', 'SSUNET', 'CSSUNET'] print('\nAvailable Model: UNET, SUNET, SSUNET, CSSUNET\n') answer = input('Select Model? ') if answer.upper() not in name_models: raise NameError('%s: Invalid model name' % (answer)) self.name_model = answer.upper() del answer print( '\n# of layer in PatchGAN Discriminator (Receptive Field Size): 0(1), 1(16), 2(34), 3(70), 4(142), 5(286)\n' ) answer = input('# of layers? ') if int(answer) not in range(6): raise ValueError('%s: Invalid # of layers in Discriminator' % (answer)) self.layer_max_d = int(answer) del answer wavelnths = [ '94', '131', '171', '193', '211', '304', '335', '1600', '1700' ] print( '\nPossible AIA wavelengths: 94, 131, 171, 193, 211, 304, 335, 1600, 1700\n' ) answer = str(int(input('AIA wavelength? '))) if answer not in wavelnths: raise ValueError('%s: Invalid AIA wavelength' % (answer)) self.wavelnth = answer self.name_input, self.name_output = 'M_720s', '%s' % (self.wavelnth) self.ch_input, self.ch_output = 1, 1 self.instr_input, self.instr_output = 'hmi', 'aia' del answer self.isize = 1024 self.ch_axis = -1 self.mode = '%s_%s.%s_%s' % (self.instr_input, self.name_input, self.instr_output, self.name_output) self.version = 'CGAN_%s_%d_%dD' % (self.name_model, self.isize, self.layer_max_d) self.root_data = '/userhome/park_e/datasets' self.root_save = '/userhome/park_e/solar_euv_generation' self.root_ckpt = '%s/%s/%s/ckpt' % (self.root_save, self.version, self.mode) self.root_snap = '%s/%s/%s/snap' % (self.root_save, self.version, self.mode) self.root_test = '%s/%s/%s/test' % (self.root_save, self.version, self.mode) self.make_tensor_input = make_tensor(self.isize, is_aia=False) self.make_tensor_output = make_tensor(self.isize, is_aia=True) self.shake_tensor = shake_tensor(self.isize) self.make_output = make_output(self.isize, self.wavelnth)
def test_matmul(is_varnode): if is_varnode: network = Network() else: network = None A = make_tensor(np.random.rand(5, 7).astype("float32"), network) B = make_tensor(np.random.rand(7, 10).astype("float32"), network) C = A @ B np.testing.assert_almost_equal(C.numpy(), A.numpy() @ B.numpy(), decimal=6)
def test_linspace(is_varnode): if is_varnode: network = Network() else: network = None cases = [ { "input": [1, 9, 9] }, { "input": [3, 10, 8] }, ] opr_test( cases, F.linspace, ref_fn=lambda start, end, step: np.linspace( start, end, step, dtype=np.float32), network=network, ) cases = [ { "input": [9, 1, 9] }, { "input": [10, 3, 8] }, ] opr_test( cases, F.linspace, ref_fn=lambda start, end, step: np.linspace( start, end, step, dtype=np.float32), network=network, ) cases = [ { "input": [1, make_tensor(9, network), 9] }, { "input": [make_tensor(1, network), 9, make_tensor(9, network)] }, ] opr_test( cases, F.linspace, ref_fn=lambda start, end, step: np.linspace(1, 9, 9, dtype=np.float32), network=network, )
def test_inplace_add(is_varnode): if is_varnode: network = Network() else: network = None x_np = np.random.rand(10).astype("float32") y_np = np.random.rand(10).astype("float32") x = make_tensor(x_np, network) y = make_tensor(y_np, network) y += x out_np = y.numpy() np.testing.assert_almost_equal(out_np, x_np + y_np)
def test_condtake(is_varnode): if is_varnode: network = Network() else: network = None x = np.array([[1, 2, 3], [4, 5, 6]]).astype("float32") y = np.array([[True, False, True], [False, True, True]]) xx = make_tensor(x, network) yy = make_tensor(y, network) val, idx = F.cond_take(yy, xx) np.testing.assert_equal(val.numpy(), x[y]) np.testing.assert_equal(idx.numpy(), np.where(y.reshape(-1))[0])
def test_concat_device(is_varnode): if is_varnode: network = Network() else: network = None data1 = make_tensor( np.random.random((3, 2, 2)).astype("float32"), network, "cpu0") data2 = make_tensor( np.random.random((2, 2, 2)).astype("float32"), network, "cpu1") out = F.concat([data1, data2], device="cpu0") assert str(out.device).split(":")[0] == "cpu0"
def test_matmul(is_varnode, shape_a, shape_b): if is_varnode: network = Network() else: network = None A = make_tensor(np.random.rand(*shape_a).astype("float32"), network) B = make_tensor(np.random.rand(*shape_b).astype("float32"), network) C = A @ B if is_varnode: np.testing.assert_almost_equal( get_var_value(C), get_var_value(A) @ get_var_value(B), decimal=6 ) else: np.testing.assert_almost_equal(C.numpy(), A.numpy() @ B.numpy(), decimal=6)
def retrieve_images(sketch_query, query_label, sketch_z_encoder, image_s_enocoder, fusion_network, z_output_image, s_output_image, image_feature_dataset, image_label_dataset): sketch_query = sketch_query.reshape(-1, sketch_query.shape[0]) with torch.no_grad(): query_z_vector = sketch_z_encoder(sketch_query) distance = torch.sum((z_output_image - query_z_vector)**2, dim=1) sorted_distance, sorted_arg = torch.sort(distance) k_closest_arg = sorted_arg[0:params.num_query] K_closest_features = s_output_image[k_closest_arg] query_z_vector = torch.cat([query_z_vector] * params.num_query) fake_features = fusion_network(query_z_vector, K_closest_features) normalized_distance = torch.sum( (image_feature_dataset[k_closest_arg] - fake_features)**2, dim=1) _, normalized_sorted_arg = torch.sort(normalized_distance) retrived_arg = k_closest_arg[normalized_sorted_arg] # predicted_label = image_label_dataset[retrived_arg] predicted_label = image_label_dataset[k_closest_arg] # print( ' predicated label ',image_label_dataset[sorted_arg]) # print('query label', query_label) weight = 1 / make_tensor( np.arange(1, params.num_query + 1, dtype=np.float32)) score = torch.sum(weight * (predicted_label == query_label)) / torch.sum(weight) return predicted_label, score
def test_split(is_varnode): if is_varnode: network = Network() else: network = None data = np.random.random((2, 3, 4, 5)).astype(np.float32) inp = make_tensor(data, network) mge_out0 = F.split(inp, 2, axis=3) mge_out1 = F.split(inp, [3], axis=3) np_out = np.split(data, [3, 5], axis=3) assert len(mge_out0) == 2 assert len(mge_out1) == 2 np.testing.assert_equal(mge_out0[0].numpy(), np_out[0]) np.testing.assert_equal(mge_out1[0].numpy(), np_out[0]) np.testing.assert_equal(mge_out0[1].numpy(), np_out[1]) np.testing.assert_equal(mge_out1[1].numpy(), np_out[1]) try: F.split(inp, 4) assert False except ValueError as e: pass try: F.split(inp, [3, 3, 5], axis=3) assert False except ValueError as e: assert str(e) == "Invalid nsplits_or_secions: [3, 3, 5]"
def test_x(x_np): for m in ["sum", "prod", "min", "max", "mean"]: x = make_tensor(x_np, network) y = getattr(x, m)(axis=-1, keepdims=True) np.testing.assert_almost_equal(y.numpy(), getattr(x_np, m)(-1), decimal=6)
def get_style_transfer(model: TransferModel, sp: spm.SentencePieceProcessor, preprocessed_batch: tp.List[str], dest_styles: tp.List[int], temperature: float = 0.001, max_steps: int = 30, bos_token: int = 1, eos_token: int = 2) -> tp.List[str]: """ Get style transfer of batch of text """ model.eval() batch_ids = [sp.encode_as_ids(text) for text in preprocessed_batch] batch_ids = make_tensor(batch_ids, 1, 2, 0).to(model.encoder.embedding.weight.device) styles = torch.tensor(dest_styles, dtype=int, device=model.encoder.embedding.weight.device) translated_batch, pad_mask = model.temperature_translate_batch( batch_ids, batch_ids != 0, styles, temperature, max_steps, bos_token, eos_token) translated_batch *= pad_mask result = [sp.decode(item) for item in translated_batch.T.tolist()] return result
def copy_test(dst, src, network): data = np.random.random((2, 3)).astype(np.float32) x = make_tensor(data, device=src, network=network) y = F.copy(x, dst) assert np.allclose(data, y.numpy()) if network is None: z = x.to(dst) assert np.allclose(data, z.numpy())
def test_identity(is_varnode): if is_varnode: network = Network() else: network = None x = make_tensor(np.random.random((5, 10)).astype(np.float32), network) y = F.copy(x) np.testing.assert_equal(y.numpy(), x)
def test_elemwise_dtype_promotion(is_varnode): if is_varnode: network = Network() else: network = None x = np.random.rand(2, 3).astype("float32") y = np.random.rand(1, 3).astype("float16") xx = make_tensor(x, network) yy = make_tensor(y, network) z = xx * yy np.testing.assert_equal(z.numpy(), x * y) z = xx + y np.testing.assert_equal(z.numpy(), x + y) z = x - yy np.testing.assert_equal(z.numpy(), x - y)
def test_transpose(is_varnode): if is_varnode: network = Network() else: network = None x = np.random.rand(2, 5).astype("float32") xx = make_tensor(x, network) np.testing.assert_almost_equal(xx.T.numpy(), x.T)
def test_reshape(is_varnode): if is_varnode: network = Network() else: network = None x = np.arange(6, dtype="float32") xx = make_tensor(x, network) y = x.reshape(1, 2, 3) for shape in [ (1, 2, 3), (1, -1, 3), (1, make_tensor(-1, network), 3), np.array([1, -1, 3], dtype="int32"), make_tensor([1, -1, 3], network), ]: yy = F.reshape(xx, shape) np.testing.assert_equal(yy.numpy(), y)
def generate(model, char2id, id2char, args): prime_str = args.starts_with inputs_ = [make_tensor(seq[: -1], char2id) for seq in [prime_str]] prime_input = Variable(torch.stack(inputs_)).transpose(0, 1) predicted = prime_str hidden = model.init_hidden(1) for p in range(len(prime_str) - 1): _, hidden = model(prime_input[p], hidden) inp = prime_input[-1] for p in range(args.predict_len): output, _ = model(inp, hidden) output_dist = output.data.view(-1).div(args.temperature).exp() top_id = torch.multinomial(output_dist, 1)[0] predicted_char = id2char[top_id.item()] predicted += predicted_char inp = make_tensor(predicted_char, char2id) return predicted.replace('0', '')
def predict(self, data, model_params, hid_layers=100): ''' :param data: the dataset (nparray) :param phi_params: The state dict of the MLP''' #Handle case of no data if data.shape[0] == 0: return pd.DataFrame() model = BaseMLP(data.shape[1], hid_layers) model.load_state_dict(model_params) return pd.DataFrame(model(make_tensor(data)).detach().numpy())
def test_expand_dims_for_scalar(): x = np.array(1, dtype="float32") xx = make_tensor(x, None) for axis in [0, -1, (0, 1), (-1, -2), (0, -1)]: y = np.expand_dims(x, axis) yy = F.expand_dims(xx, axis) np.testing.assert_equal(y, yy.numpy()) for axis in [1, -2, (1, 2), (-2, -3)]: np.testing.assert_raises(np.AxisError, np.expand_dims, x, axis) np.testing.assert_raises(RuntimeError, F.expand_dims, xx, axis)
def test_set_value(is_varnode): if is_varnode: network = Network() else: network = None v0 = np.random.random((2, 3)).astype(np.float32) param = make_tensor(v0, network) v1 = np.random.random((2, 3)).astype(np.float32) param[...] = v1 np.testing.assert_allclose(param.numpy(), v1, atol=5e-6)
def test_literal_arith(is_varnode): if is_varnode: network = Network() else: network = None x_np = np.random.rand(10).astype("float32") x = make_tensor(x_np, network) y = x * 2 y_np = y.numpy() np.testing.assert_almost_equal(y_np, x_np * 2)
def test_squeeze(is_varnode): if is_varnode: network = Network() else: network = None x = np.arange(6, dtype="float32").reshape(1, 2, 3, 1) xx = make_tensor(x, network) for axis in [None, 3, -4, (3, -4)]: y = np.squeeze(x, axis) yy = F.squeeze(xx, axis) np.testing.assert_equal(y, yy.numpy())
def test_expand_dims(is_varnode): if is_varnode: network = Network() else: network = None x = np.arange(6, dtype="float32").reshape(2, 3) xx = make_tensor(x, network) for axis in [2, -3, (3, -4), (1, -4)]: y = np.expand_dims(x, axis) yy = F.expand_dims(xx, axis) np.testing.assert_equal(y, yy.numpy())
def test_set_subtensor(is_varnode): if is_varnode: network = Network() else: network = None x = make_tensor([1, 2, 3], network) x[:] = [1, 1, 1] np.testing.assert_almost_equal(x.numpy(), [1, 1, 1], decimal=6) x[[0, 2]] = [3, 2] np.testing.assert_almost_equal(x.numpy(), [3, 1, 2], decimal=6) x[1:3] = [4, 5] np.testing.assert_almost_equal(x.numpy(), [3, 4, 5], decimal=6)
def fusion_validation(query_feature_arr, query_label_arr, sketch_z_encoder, image_z_encoder, image_s_encoder, fusion_network, image_feature_dataset, image_label_dataset): sketch_z_encoder.eval() image_z_encoder.eval() image_s_encoder.eval() total_score = 0 query_feature_arr = make_tensor(query_feature_arr) query_label_arr = make_tensor(query_label_arr) image_feature_dataset = make_tensor(image_feature_dataset) image_label_dataset = make_tensor(image_label_dataset) with torch.no_grad(): z_output_image = image_z_encoder(image_feature_dataset) s_output_image = image_s_encoder(image_feature_dataset) for query, label in zip(query_feature_arr, query_label_arr): _, scores = retrieve_images(query, label, sketch_z_encoder, image_s_encoder, fusion_network, z_output_image, s_output_image, image_feature_dataset, image_label_dataset) total_score += scores print("validation scores :", total_score / len(query_feature_arr))
def test_as_type(is_varnode): if is_varnode: network = Network() else: network = None x_np = np.array([1, 2, 3], dtype=np.float32) x = make_tensor(x_np, network) y = x.astype(qint8(0.1)) np.testing.assert_almost_equal(get_scale(y.dtype), 0.1) z = y.astype(qint8(0.2)) np.testing.assert_almost_equal(get_scale(z.dtype), 0.2) a = z.astype(quint8(0.3, 127)) np.testing.assert_almost_equal(get_scale(a.dtype), 0.3) np.testing.assert_equal(get_zero_point(a.dtype), 127) b = a.astype(quint8(0.3, 128)) np.testing.assert_almost_equal(get_scale(b.dtype), 0.3) np.testing.assert_equal(get_zero_point(b.dtype), 128)