示例#1
0
 def load_state_dict(self):
     print(f"Loading model from {self.args.model_load_dir}")
     if self.is_global:
         state_dict = flow.load(self.args.model_load_dir, global_src_rank=0)
     elif self.rank == 0:
         state_dict = flow.load(self.args.model_load_dir)
     else:
         return
     self.wdl_module.load_state_dict(state_dict)
示例#2
0
 def restore_model(self, model_save_dir):
     """Restore the tra,zined generator and discriminator."""
     print("Loading the pretrain models...")
     G_path = os.path.join(model_save_dir, "200000-G")
     D_path = os.path.join(model_save_dir, "200000-D")
     C_path = os.path.join(model_save_dir, "200000-C")
     self.G.load_state_dict(flow.load(G_path))
     self.D.load_state_dict(flow.load(D_path))
     self.C.load_state_dict(flow.load(C_path))
示例#3
0
 def loadModel(self, PATH):
     self.generator_A2B.load_state_dict(
         flow.load(os.path.join(PATH, "generator_A2B")))
     self.generator_B2A.load_state_dict(
         flow.load(os.path.join(PATH, "generator_B2A")))
     self.discriminator_A.load_state_dict(
         flow.load(os.path.join(PATH, "discriminator_A")))
     self.discriminator_B.load_state_dict(
         flow.load(os.path.join(PATH, "discriminator_B")))
示例#4
0
    def load_state_dict(self):

        if self.is_consistent:
            state_dict = flow.load(self.load_path, consistent_src_rank=0)
        elif self.rank == 0:
            state_dict = flow.load(self.load_path)
        else:
            return
        logging.info("Model resume successfully!")
        self.model.load_state_dict(state_dict)
示例#5
0
    def test_save_and_load_consistent_from_nested_dict(test_case):
        class CustomModule(flow.nn.Module):
            def __init__(self):
                super().__init__()
                self.param = flow.nn.Parameter(flow.randn(3, 32, 3, 3))

            def forward(self):
                return self.param

        m1 = CustomModule()
        m1 = m1.to_consistent(flow.placement("cuda", {0: range(2)}),
                              flow.sbp.broadcast)
        m2 = CustomModule()
        m2 = m2.to_consistent(flow.placement("cuda", {0: range(2)}),
                              flow.sbp.broadcast)
        res1 = m1() + m2()
        state_dict1 = m1.state_dict()
        state_dict2 = m2.state_dict()
        state_dict = {"m1": state_dict1, "m2": state_dict2}

        with tempfile.TemporaryDirectory() as f:
            with test_case.assertRaises(Exception):
                flow.save(state_dict, f)

            consistent_src_dst_rank = 0
            flow.save(state_dict,
                      f,
                      consistent_dst_rank=consistent_src_dst_rank)
            rank = flow.env.get_rank()
            if rank != consistent_src_dst_rank:
                test_case.assertEqual(len(os.listdir(f)), 0)

            m1 = CustomModule()
            m1 = m1.to_consistent(flow.placement("cuda", {0: range(2)}),
                                  flow.sbp.broadcast)
            m2 = CustomModule()
            m2 = m2.to_consistent(flow.placement("cuda", {0: range(2)}),
                                  flow.sbp.broadcast)

            with test_case.assertRaises(Exception):
                loaded_state_dict = flow.load(f)
                m1.load_state_dict(loaded_state_dict["m1"])

            loaded_state_dict = flow.load(
                f, consistent_src_rank=consistent_src_dst_rank)
            test_case.assertEqual(len(loaded_state_dict), 2)
            m1.load_state_dict(loaded_state_dict["m1"])
            m2.load_state_dict(loaded_state_dict["m2"])
            res2 = m1() + m2()

        test_case.assertTrue(
            np.array_equal(
                res1.to_consistent(sbp=flow.sbp.broadcast).to_local().numpy(),
                res2.to_consistent(sbp=flow.sbp.broadcast).to_local().numpy(),
            ))
示例#6
0
    def load_state_dict(self):
        self.logger.print(f"Loading model from {self.load_path}",
                          print_ranks=[0])
        if self.is_global:
            state_dict = flow.load(self.load_path, global_src_rank=0)
        elif self.rank == 0:
            state_dict = flow.load(self.load_path)
        else:
            return

        self.model.load_state_dict(state_dict)
示例#7
0
def main(args):

    device = args.device
    input_lang, output_lang, pairs = prepareData("eng", "fra", True)
    e = flow.load(args.encoder_path)
    d = flow.load(args.decoder_path)
    encoder = EncoderRNN_oneflow(input_lang.n_words, 256).to(device)
    decoder = AttnDecoderRNN_oneflow(256, output_lang.n_words,
                                     dropout_p=0.1).to(device)
    encoder.load_state_dict(e)
    decoder.load_state_dict(d)
    evaluateRandomly(encoder, decoder, pairs, input_lang, output_lang)
示例#8
0
    def train_by_oneflow():
        x = Parameter(flow.Tensor(init_value, device=flow.device(device)))
        adagrad = flow.optim.Adagrad(
            [
                {
                    "params": [x],
                    "lr": learning_rate,
                    "eps": eps,
                    "weight_decay": weight_decay,
                }
            ],
            lr_decay=lr_decay,
            initial_accumulator_value=initial_accumulator_value,
        )

        def train_one_iter(grad):
            grad_tensor = flow.tensor(
                grad, requires_grad=False, device=flow.device(device)
            )
            loss = flow.sum(x * grad_tensor)
            loss.backward()
            adagrad.step()
            adagrad.zero_grad()

        for i in range(train_iters):
            train_one_iter(random_grad_seq[i])
            if i == reload_state_step:
                state_dict = adagrad.state_dict()
                adagrad = flow.optim.Adagrad([x])
                if save_load_by_pickle:
                    with tempfile.TemporaryDirectory() as save_dir:
                        flow.save(state_dict, save_dir)
                        state_dict = flow.load(save_dir)
                adagrad.load_state_dict(state_dict)
        return x
示例#9
0
def main(args):

    start_t = time.time()
    repVGGA0 = create_RepVGG_A0()
    end_t = time.time()
    print("init time : {}".format(end_t - start_t))

    start_t = time.time()
    pretrain_models = flow.load(args.model_path)
    repVGGA0.load_state_dict(pretrain_models)
    end_t = time.time()
    print("load params time : {}".format(end_t - start_t))

    repVGGA0.eval()
    repVGGA0.to("cuda")

    start_t = time.time()
    image = load_image(args.image_path)
    image = flow.Tensor(image, device=flow.device("cuda"))
    predictions = repVGGA0(image).softmax()
    predictions = predictions.numpy()
    end_t = time.time()
    print("infer time : {}".format(end_t - start_t))
    clsidx = np.argmax(predictions)
    print("predict prob: %f, class name: %s" %
          (np.max(predictions), clsidx_2_labels[clsidx]))
示例#10
0
def main(args):
    assert args.model in model_dict
    print("Predicting using", args.model, "...")

    start_t = time.time()
    net_module = model_dict[args.model]()
    end_t = time.time()
    print("init time : {}".format(end_t - start_t))

    start_t = time.time()
    pretrain_models = flow.load(args.model_path)
    net_module.load_state_dict(pretrain_models)
    end_t = time.time()
    print("load params time : {}".format(end_t - start_t))

    net_module.eval()
    net_module.to("cuda")

    start_t = time.time()
    image = load_image(args.image_path)
    image = flow.Tensor(image, device=flow.device("cuda"))
    predictions = net_module(image).softmax()
    predictions = predictions.numpy()
    end_t = time.time()
    print("infer time : {}".format(end_t - start_t))
    clsidx = np.argmax(predictions)
    print("predict prob: %f, class name: %s" %
          (np.max(predictions), clsidx_2_labels[clsidx]))
示例#11
0
def test(opt):

    model = DeepQNetwork()
    pretrain_models = flow.load("{}".format(opt.saved_path))
    model.load_state_dict(pretrain_models)
    model.eval()
    model.to("cuda")
    game_state = GameState()
    image, reward, terminal = game_state.frame_step(0)
    image = pre_processing(
        image[:game_state.SCREENWIDTH, :int(game_state.BASEY)],
        opt.image_size,
        opt.image_size,
    )
    image = flow.Tensor(image)
    image = image.to("cuda")
    state = flow.cat(tuple(image for _ in range(4))).unsqueeze(0)

    while True:
        prediction = model(state)[0]
        action = flow.argmax(prediction).numpy()[0]

        next_image, reward, terminal = game_state.frame_step(action)
        next_image = pre_processing(
            next_image[:game_state.SCREENWIDTH, :int(game_state.BASEY)],
            opt.image_size,
            opt.image_size,
        )
        next_image = flow.Tensor(next_image)
        next_image = next_image.to("cuda")
        next_state = flow.cat((state[0, 1:, :, :], next_image)).unsqueeze(0)

        state = next_state
示例#12
0
文件: val.py 项目: Oneflow-Inc/models
def main(args):

    cfg = get_config(args.config)
    logging.basicConfig(level=logging.NOTSET)
    logging.info(args.model_path)

    backbone = get_model(cfg.network,
                         dropout=0.0,
                         num_features=cfg.embedding_size).to("cuda")
    val_callback = CallBackVerification(1, 0, cfg.val_targets,
                                        cfg.ofrecord_path)

    state_dict = flow.load(args.model_path)

    new_parameters = dict()
    for key, value in state_dict.items():
        if "num_batches_tracked" not in key:
            if key == "fc.weight":
                continue
            new_key = key.replace("backbone.", "")
            new_parameters[new_key] = value

    backbone.load_state_dict(new_parameters)

    infer_graph = EvalGraph(backbone, cfg)
    val_callback(1000, backbone, infer_graph)
示例#13
0
def main(args):

    start_t = time.time()
    model = build_model(args)
    end_t = time.time()
    print("init time : {}".format(end_t - start_t))

    start_t = time.time()
    pretrain_models = flow.load(args.model_path)
    model.load_state_dict(pretrain_models)
    end_t = time.time()
    print("load params time : {}".format(end_t - start_t))

    model.eval()
    model.to("cuda")

    start_t = time.time()
    image = load_image(args.image_path,
                       image_size=(args.image_size, args.image_size))
    image = flow.Tensor(image, device=flow.device("cuda"))
    predictions = model(image).softmax()
    predictions = predictions.numpy()
    end_t = time.time()
    print("infer time : {}".format(end_t - start_t))
    clsidx = np.argmax(predictions)
    print("predict prob: %f, class name: %s" %
          (np.max(predictions), clsidx_2_labels[clsidx]))
示例#14
0
 def load_model(self, checkpoint):
     chkpt = os.listdir(checkpoint)
     if "model.pt" in chkpt:
         self.model.load_state_dict(
             flow.load(os.path.join(checkpoint, "model.pt")))
     elif "encoder.pt" in chkpt and "decoder.pt" in chkpt:
         self.model.encoder.load_state_dict(
             flow.load(os.path.join(checkpoint, "encoder.pt")))
         self.model.decoder.load_state_dict(
             flow.load(os.path.join(checkpoint, "decoder.pt")))
     if "frontend.pt" in chkpt:
         self.model.frontend.load_state_dict(
             flow.load(os.path.join(checkpoint, "frontend.pt")))
     if "ctc.pt" in chkpt:
         self.model.assistor.load_state_dict(
             flow.load(os.path.join(checkpoint, "ctc.pt")))
示例#15
0
    def train_by_oneflow():
        x = Parameter(flow.Tensor(init_value, device=flow.device(device)))
        sgd = flow.optim.SGD([{
            "params": [x],
            "lr": learning_rate,
            "momentum": momentum,
            "weight_decay": weight_decay,
        }])

        def train_one_iter(grad):
            grad_tensor = flow.tensor(
                grad,
                dtype=flow.float32,
                requires_grad=False,
                device=flow.device(device),
            )
            loss = flow.sum(x * grad_tensor)
            loss.backward()
            sgd.step()
            sgd.zero_grad()

        for i in range(train_iters):
            train_one_iter(random_grad_seq[i])
            # test state_dict/load_state_dict
            if i == reload_state_step:
                state_dict = sgd.state_dict()
                sgd = flow.optim.SGD([x])
                if save_load_by_pickle:
                    with tempfile.TemporaryDirectory() as save_dir:
                        flow.save(state_dict, save_dir)
                        state_dict = flow.load(save_dir)
                sgd.load_state_dict(state_dict)
        return x
示例#16
0
def load_checkpoint(
    model,
    path_to_checkpoint,
):
    """
    Load the checkpoint from the given file. If inflation is True, inflate the
    2D Conv weights from the checkpoint to 3D Conv.
    Args:
        path_to_checkpoint (string): path to the checkpoint to load.
        model (model): model to load the weights from the checkpoint.
    """
    checkpoint = flow.load(path_to_checkpoint)

    if not isinstance(checkpoint, dict):
        raise RuntimeError("No state_dict found in checkpoint file {}".format(
            path_to_checkpoint))

    # get state_dict from checkpoint
    if "state_dict" in checkpoint:
        state_dict = checkpoint["state_dict"]
    else:
        state_dict = checkpoint

    model.load_state_dict(state_dict)

    return model
示例#17
0
def main(args):
    flow.env.init()
    flow.enable_eager_execution()

    start_t = time.time()
    posenet_module = PoseNet()
    end_t = time.time()
    print("init time : {}".format(end_t - start_t))

    start_t = time.time()
    pretrain_models = flow.load(args.model_path)
    posenet_module.load_state_dict(pretrain_models)
    end_t = time.time()
    print("load params time : {}".format(end_t - start_t))

    posenet_module.eval()
    posenet_module.to("cuda")

    start_t = time.time()
    image = load_image(args.image_path)
    image = flow.Tensor(image, device=flow.device("cuda"))
    logits = posenet_module(image)
    predictions = logits.softmax()
    predictions = predictions.numpy()
    end_t = time.time()
    print("infer time : {}".format(end_t - start_t))
    clsidx = np.argmax(predictions)
    print("predict prob: %f, class name: %s" %
          (np.max(predictions), clsidx_2_labels[clsidx]))
示例#18
0
def main(args):

    start_t = time.time()
    inceptionv3_module = inception_v3()
    end_t = time.time()
    print("init time : {}".format(end_t - start_t))

    start_t = time.time()
    pretrain_models = flow.load(args.model_path)
    inceptionv3_module.load_state_dict(pretrain_models)
    end_t = time.time()
    print("load params time : {}".format(end_t - start_t))

    inceptionv3_module.eval()
    inceptionv3_module.to("cuda")

    start_t = time.time()
    image = load_image(args.image_path)
    image = flow.Tensor(image, device=flow.device("cuda"))
    predictions, aux_predictions = inceptionv3_module(image)
    predictions = predictions.softmax()
    predictions = predictions.numpy()
    end_t = time.time()
    print("infer time : {}".format(end_t - start_t))
    clsidx = np.argmax(predictions)
    print("predict prob: %f, class name: %s" %
          (np.max(predictions), clsidx_2_labels[clsidx]))
示例#19
0
def main(args):
    start_t = time.time()
    quantization_module = QuantizationAlexNet()
    quantization_module.quantize(
        quantization_bit=args.quantization_bit,
        quantization_scheme=args.quantization_scheme,
        quantization_formula=args.quantization_formula,
        per_layer_quantization=args.per_layer_quantization,
    )
    end_t = time.time()
    print("init time : {}".format(end_t - start_t))

    start_t = time.time()
    pretrain_models = flow.load(args.model_path)
    quantization_module.load_state_dict(pretrain_models)
    end_t = time.time()
    print("load params time : {}".format(end_t - start_t))

    quantization_module.eval()
    quantization_module.to("cuda")

    start_t = time.time()
    image = load_image(args.image_path)
    image = flow.Tensor(image, device=flow.device("cuda"))
    predictions = quantization_module(image).softmax()
    predictions = predictions.numpy()
    end_t = time.time()
    print("infer time : {}".format(end_t - start_t))
    clsidx = np.argmax(predictions)
    print("predict prob: %f, class name: %s" %
          (np.max(predictions), clsidx_2_labels[clsidx]))
示例#20
0
def load_params_from_lazy(eager_state_dict, lazy_model_path):
    print(f"Restroing model from {lazy_model_path}")
    lazy_state_dict = flow.load(lazy_model_path)
    all_eager_names_list = set(eager_state_dict.keys())

    # load regular weights
    for lazy_name, lazy_weight in lazy_state_dict.items():
        # skip momentum and momentum^2 for optimizer
        if lazy_name.endswith("-v") or lazy_name.endswith("-m"):
            continue
        eager_name = change_name_from_lazy_to_eager(lazy_name)
        if eager_name not in all_eager_names_list:
            print(f"{eager_name} is not matched")
            continue
        else:
            all_eager_names_list.remove(eager_name)
            if (("dense.weight" in eager_name)
                    or ("query.weight" in eager_name)
                    or ("value.weight" in eager_name)
                    or ("key.weight" in eager_name)):
                lazy_weight = flow.tensor(lazy_weight.numpy().transpose())
            eager_state_dict[eager_name].data.copy_(lazy_weight)

    # load embedding
    eager_state_dict["bert.embeddings.word_embeddings.weight"].data.copy_(
        lazy_state_dict["bert-embeddings-word_embeddings"])
    eager_state_dict[
        "bert.embeddings.token_type_embeddings.weight"].data.copy_(
            lazy_state_dict["bert-embeddings-token_type_embeddings"])
    eager_state_dict["bert.embeddings.position_embeddings.weight"].data.copy_(
        flow.tensor(lazy_state_dict["bert-embeddings-position_embeddings"].
                    numpy().squeeze(0)))
示例#21
0
def main(args):
    start_t = time.time()
    dla_module = DLA(
        num_classes=10, levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512]
    )
    end_t = time.time()
    print("init time : {}".format(end_t - start_t))
    start_t = time.time()
    pretrain_models = flow.load(args.model_path)
    dla_module.load_state_dict(pretrain_models)
    end_t = time.time()
    print("load params time : {}".format(end_t - start_t))
    dla_module.eval()
    dla_module.to("cuda")
    start_t = time.time()
    image = load_image(args.image_path)
    image = flow.Tensor(image, device=flow.device("cuda"))
    logits = dla_module(image)
    predictions = logits.softmax()
    predictions = predictions.numpy()
    end_t = time.time()
    print("infer time : {}".format(end_t - start_t))
    clsidx = np.argmax(predictions)
    print(
        "predict prob: %f, class name: %s"
        % (np.max(predictions), clsidx_2_labels[clsidx])
    )
示例#22
0
def infer(opt):
    with open(opt.label_dict, "r") as f:
        lab_dict = json.load(f)

    cnn = simple_CNN(opt.num_speakers)
    cnn.to("cuda")

    cnn.load_state_dict(flow.load(opt.load_path))
    cnn.eval()

    label_list = lab_dict["test"]
    err_sum = 0
    for wav, label in label_list:
        inp, lab = example_precess(wav, label)
        inp = inp.unsqueeze(1)
        pout = cnn(inp)
        pred = flow.argmax(pout, dim=1)

        err = 1 if (pred + 1).numpy() != lab.long().numpy() else 0
        err_sum += err
        print(
            "wav_filename: ",
            wav,
            "    predicted speaker id: ",
            (pred + 1).numpy()[0],
            "    real speaker id: ",
            lab.long().numpy()[0],
        )
    print("accuracy: ", 1 - err_sum / 6)
示例#23
0
def convert_func(cfg, model_path, out_path, image_size):

    model_module = get_model(cfg.network,
                             dropout=0.0,
                             num_features=cfg.embedding_size).to("cuda")
    model_module.eval()
    print(model_module)
    model_graph = ModelGraph(model_module)
    model_graph._compile(flow.randn(1, 3, image_size, image_size).to("cuda"))

    with tempfile.TemporaryDirectory() as tmpdirname:
        new_parameters = dict()
        parameters = flow.load(model_path)
        for key, value in parameters.items():
            if "num_batches_tracked" not in key:
                if key == "fc.weight":
                    continue
                val = value
                new_key = key.replace("backbone.", "")
                new_parameters[new_key] = val
        model_module.load_state_dict(new_parameters)
        flow.save(model_module.state_dict(), tmpdirname)
        convert_to_onnx_and_check(model_graph,
                                  flow_weight_dir=tmpdirname,
                                  onnx_model_path="./",
                                  print_outlier=True)
示例#24
0
def main():
    transform = ST.Compose(
        [
            ST.ToNumpyForVal(),
            ST.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ]
    )
    global args
    args = parser.parse_args()
    model = CSRNet()
    model = model.to("cuda")
    # checkpoint = flow.load('checkpoint/Shanghai_BestModelA/shanghaiA_bestmodel')
    checkpoint = flow.load(args.modelPath)
    model.load_state_dict(checkpoint)
    img = transform(Image.open(args.picPath).convert("RGB"))
    img = flow.Tensor(img)
    img = img.to("cuda")
    output = model(img.unsqueeze(0))
    print("Predicted Count : ", int(output.detach().to("cpu").sum().numpy()))
    temp = output.view(output.shape[2], output.shape[3])
    temp = temp.numpy()
    plt.title("Predicted Count")
    plt.imshow(temp, cmap=c.jet)
    plt.show()
    temp = h5py.File(args.picDensity, "r")
    temp_1 = np.asarray(temp["density"])
    plt.title("Original Count")
    plt.imshow(temp_1, cmap=c.jet)
    print("Original Count : ", int(np.sum(temp_1)) + 1)
    plt.show()
    print("Original Image")
    plt.title("Original Image")
    plt.imshow(plt.imread(args.picPath))
    plt.show()
示例#25
0
def main(args):

    device = flow.device("cpu") if args.no_cuda else flow.device("cuda")
    with open(args.config_path, "r") as f:
        config = json.load(f)
    with open(args.vocab_path, "rb") as f:
        vocab = pickle.load(f)
    textcnn = textCNN(
        word_emb_dim=config["word_emb_dim"],
        vocab_size=len(vocab),
        dim_channel=config["dim_channel"],
        kernel_wins=config["kernel_wins"],
        dropout_rate=config["dropout_rate"],
        num_class=config["num_class"],
        max_seq_len=config["max_seq_len"],
    )
    textcnn.load_state_dict(flow.load(args.model_path))
    textcnn.eval()
    textcnn.to(device)
    text = utils.clean_str(args.text)
    text = [utils.tokenizer(text)]
    input = flow.tensor(np.array(utils.tensorize_data(text, vocab,
                                                      max_len=200)),
                        dtype=flow.long).to(device)
    predictions = textcnn(input).softmax()
    predictions = predictions.numpy()
    clsidx = np.argmax(predictions)
    print("predict prob: %f, class name: %s" % (np.max(predictions), clsidx))
示例#26
0
def main(args):
    start_t = time.perf_counter()

    print("***** Model Init *****")
    model = resnet50()
    model.load_state_dict(flow.load(args.model_path))
    model = model.to("cuda")
    model.eval()
    end_t = time.perf_counter()
    print(f"***** Model Init Finish, time escapled {end_t - start_t:.6f} s *****")

    if args.graph:
        model_graph = InferGraph(model)

    start_t = end_t
    image = load_image(args.image_path)
    image = flow.Tensor(image, device=flow.device("cuda"))
    if args.graph:
        pred = model_graph(image)
    else:
        pred = model(image).softmax()

    pred = pred.numpy()
    prob = np.max(pred)
    clsidx = np.argmax(pred)
    cls = clsidx_2_labels[clsidx]

    end_t = time.perf_counter()
    print(
        "predict image ({}) prob: {:.5f}, class name: {}, time escapled: {:.6f} s".format(
            os.path.basename(args.image_path), prob, cls, end_t - start_t
        )
    )
示例#27
0
    def __init__(self, load_weights=False):
        super(CSRNet, self).__init__()
        self.seen = 0
        self.frontend_feat = [
            64,
            64,
            "M",
            128,
            128,
            "M",
            256,
            256,
            256,
            "M",
            512,
            512,
            512,
        ]
        self.backend_feat = [512, 512, 512, 256, 128, 64]
        self.frontend = make_layers(self.frontend_feat)
        self.backend = make_layers(self.backend_feat,
                                   in_channels=512,
                                   dilation=True)
        self.output_layer = nn.Conv2d(64, 1, kernel_size=1)

        if not load_weights:
            mod = vgg16(pretrained=True)
            pretrain_models = flow.load(
                "vgg_imagenet_pretrain_model/vgg16_oneflow_model")
            mod.load_state_dict(pretrain_models)
            self._initialize_weights()
            for i in range(len(self.frontend.state_dict().items())):
                src = list(mod.state_dict().items())[i][1]
                dst = list(self.frontend.state_dict().items())[i][1].copy_(src)
示例#28
0
def main(args):
    if not os.path.exists(args.save_path):
        os.mkdir(args.save_path)

    net = UNet(n_channels=3, n_classes=1)

    checkpoint = flow.load(args.pretrained_path)
    net.load_state_dict(checkpoint)

    net.to("cuda")

    x_test_dir, y_test_dir = get_datadir_path(args, split="test")

    test_dataset = Dataset(
        x_test_dir, y_test_dir, augmentation=get_test_augmentation(),
    )

    print("Begin Testing...")
    for i, (image, mask) in enumerate(tqdm(test_dataset)):
        show_image = image
        with flow.no_grad():
            image = image / 255.0
            image = image.astype(np.float32)
            image = flow.tensor(image, dtype=flow.float32)
            image = image.permute(2, 0, 1)
            image = image.to("cuda")

            pred = net(image.unsqueeze(0).to("cuda"))
            pred = pred.numpy()
            pred = pred > 0.5
        save_picture_name = os.path.join(args.save_path, "test_image_" + str(i))
        visualize(
            save_picture_name, image=show_image, GT=mask[0, :, :], Pred=pred[0, 0, :, :]
        )
示例#29
0
def recognize(args):
    # model
    char_list, sos_id, eos_id = process_dict(args.dict)
    vocab_size = len(char_list)
    encoder = Encoder(
        args.d_input * args.LFR_m,
        args.n_layers_enc,
        args.n_head,
        args.d_k,
        args.d_v,
        args.d_model,
        args.d_inner,
        dropout=args.dropout,
        pe_maxlen=args.pe_maxlen,
    )
    decoder = Decoder(
        sos_id,
        eos_id,
        vocab_size,
        args.d_word_vec,
        args.n_layers_dec,
        args.n_head,
        args.d_k,
        args.d_v,
        args.d_model,
        args.d_inner,
        dropout=args.dropout,
        tgt_emb_prj_weight_sharing=args.tgt_emb_prj_weight_sharing,
        pe_maxlen=args.pe_maxlen,
    )
    model = Transformer(encoder, decoder)
    model.load_state_dict(flow.load(args.model_path))
    device = flow.device("cuda")
    model.eval()
    model.to(device)
    LFR_m = args.LFR_m
    LFR_n = args.LFR_n
    char_list, sos_id, eos_id = process_dict(args.dict)
    assert model.decoder.sos_id == sos_id and model.decoder.eos_id == eos_id

    # read json data
    with open(args.recog_json, "rb") as f:
        js = json.load(f)["utts"]

    # decode each utterance
    new_js = {}
    with flow.no_grad():
        for idx, name in enumerate(js.keys(), 1):
            print("(%d/%d) decoding %s" % (idx, len(js.keys()), name), flush=True)
            input = kaldi_io.read_mat(js[name]["input"][0]["feat"])
            input = build_LFR_features(input, LFR_m, LFR_n)
            input = flow.tensor(input).to(dtype=flow.float32)
            input_length = flow.tensor([input.size(0)], dtype=flow.int64)
            input = input.to(device)
            input_length = input_length.to(device)
            nbest_hyps = model.recognize(input, input_length, char_list, args)
            new_js[name] = add_results_to_json(js[name], nbest_hyps, char_list)

    with open(args.result_label, "wb") as f:
        f.write(json.dumps({"utts": new_js}, indent=4, sort_keys=True).encode("utf_8"))
示例#30
0
def main(args):
    test_x, test_y = load_image(args.image_path)

    test_inp = to_tensor(test_x.astype(np.float32))
    test_target = to_tensor(test_y.astype(np.float32))

    generator = Generator().to("cuda")

    start_t = time.time()
    pretrain_model = flow.load(args.model_path)
    generator.load_state_dict(pretrain_model)
    end_t = time.time()
    print("load params time : {}".format(end_t - start_t))

    start_t = time.time()
    generator.eval()
    with flow.no_grad():
        gout = to_numpy(generator(test_inp), False)
    end_t = time.time()
    print("infer time : {}".format(end_t - start_t))

    # save images
    save_images(
        gout,
        test_inp.numpy(),
        test_target.numpy(),
        path=os.path.join("./testimage.png"),
        plot_size=1,
    )