def load_CNN_model(saveDir): net = resnet.resnet(in_channel=3, num_classes=1) print("Loading model", saveDir) ckptPath = os.path.join(saveDir, "best_ckpt") useCuda = torch.cuda.is_available() if useCuda: ckpt = torch.load(ckptPath) else: ckpt = torch.load(ckptPath, map_location='cpu') net.load_state_dict(ckpt['model_state_dict']) if useCuda: net.cuda() net.eval() return net, useCuda
def train_and_save_model(): trainset, testset = data.get_train_test_set() trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True) testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False) net = resnet.resnet(in_channel=3, num_classes=1) optimizer = torch.optim.Adam(net.parameters(), lr=LR) criterion = nn.BCELoss() utils.train( net, trainloader, testloader, 20, optimizer, criterion, debug=DEBUG, )
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, device_id=args.device_id) if __name__ == '__main__': if args.network_dataset == 'resnet50_cifar10': from src.config import config1 as config from src.resnet import resnet50 as resnet elif args.network_dataset == 'resnet50_imagenet2012': from src.config import config2 as config from src.resnet import resnet50 as resnet elif args.network_dataset == 'resnet101_imagenet2012': from src.config import config3 as config from src.resnet import resnet101 as resnet elif args.network_dataset == 'se-resnet50_imagenet2012': from src.config import config4 as config from src.resnet import se_resnet50 as resnet else: raise ValueError("network and dataset is not support.") net = resnet(config.class_num) assert args.ckpt_file is not None, "checkpoint_path is None." param_dict = load_checkpoint(args.ckpt_file) load_param_into_net(net, param_dict) input_arr = Tensor(np.zeros([args.batch_size, 3, args.height, args.width], np.float32)) export(net, input_arr, file_name=args.file_name, file_format=args.file_format)
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target) dataset_sink_mode = not args.device_target == "CPU" # download mnist dataset #download_dataset() if args.pre_trained: param_dict = load_checkpoint(args.pre_trained) load_param_into_net(net, param_dict) # loss function definition loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") #learning rate setting #lr = 0.01 momentum = 0.9 #create the network net = resnet(class_num=10) #define the optimizer opt = nn.Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) batch_num = 128 # CheckPoint CallBack definition config_ck = CheckpointConfig(save_checkpoint_steps=batch_num, keep_checkpoint_max=35) ckpoint_cb = ModelCheckpoint(prefix="train_resnet_cifar10", directory="./", config=config_ck) train_epoch = 90 cifar_path = "./CIFAR-10" dataset_size = 1 model = Model(net, loss, opt, metrics={"Accuracy": Accuracy()})
type=str, default="CPU", choices=['Ascend', 'GPU', 'CPU'], help='device where the code will be implemented (default: CPU)') parser.add_argument('--checkpoint_path', type=str, default=None, help='Pretrained checkpoint path') args = parser.parse_args() context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target) dataset_sink_mode = not args.device_target == "CPU" # define net net = resnet( class_num=10 ) #if you wish to consider other module you can pass also this argument # ckpoint = args.checkpoint_path # define loss, model loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') #define cifar-10 path cifar_path = "./CIFAR-10" # define model model = Model(net, loss_fn=loss, metrics={"Accuracy": Accuracy()}) # eval model test_net(net, model, cifar_path, args.checkpoint_path) # # config for resent50, cifar10 # config1 = ed({ # "class_num": 10, # "batch_size": 32,
device_target=args_opt.device_target) if args_opt.device_target == "Ascend": context.set_context(device_id=args_opt.device_id) if __name__ == '__main__': if args_opt.dataset_name == 'cifar10': width_multiplier = 1 cifar_stem = True projection_dimension = 128 image_height = 32 image_width = 32 else: raise ValueError("dataset is not support.") base_net = resnet(1, width_multiplier=width_multiplier, cifar_stem=cifar_stem) net = SimCLR(base_net, projection_dimension, base_net.end_point.in_channels) param_dict = load_checkpoint(args_opt.ckpt_file) load_param_into_net(net, param_dict) input_arr = Tensor( np.zeros([args_opt.batch_size, 3, image_height, image_width]), ms.float32) export(net, input_arr, file_name=args_opt.file_name, file_format=args_opt.file_format)
class NetWithLossCell(nn.Cell): def __init__(self, backbone, loss_fn): super(NetWithLossCell, self).__init__(auto_prefix=False) self._backbone = backbone self._loss_fn = loss_fn def construct(self, data_x, data_y, label): _, _, x_pred, y_pred = self._backbone(data_x, data_y) return self._loss_fn(x_pred, y_pred) if __name__ == "__main__": dataset = create_dataset(args, dataset_mode="train_endcoder") # Net. base_net = resnet(1, args.width_multiplier, cifar_stem=args.dataset_name == "cifar10") net = SimCLR(base_net, args.projection_dimension, base_net.end_point.in_channels) # init weight if args.pre_trained_path: if args.run_cloudbrain: mox.file.copy_parallel(src_url=args.pre_trained_path, dst_url=local_data_url + '/pre_train.ckpt') param_dict = load_checkpoint(local_data_url + '/pre_train.ckpt') else: param_dict = load_checkpoint(args.pre_trained_path) load_param_into_net(net, param_dict) else: for _, cell in net.cells_and_names(): if isinstance(cell, nn.Conv2d):