コード例 #1
0
def sync_random_Horizontal_Flip(input_images, target_images):
    '''
      Randomly flip the input images and the target images.
    '''
    seed = np.random.randint(0, 2000000000)
    mindspore.set_seed(seed)
    op = C.RandomHorizontalFlip(prob=0.5)
    out_input = op(input_images)
    mindspore.set_seed(seed)
    op = C.RandomHorizontalFlip(prob=0.5)
    out_target = op(target_images)
    return out_input, out_target
コード例 #2
0
    def test_char_encoder(self):
        context.set_context(mode=context.PYNATIVE_MODE, device_target='Ascend')
        mindspore.set_seed(1)
        weights = Tensor(np.random.randint(0, 9, [7, 10]), mindspore.float32)
        biases = Tensor(np.random.randint(0, 9, [7]), mindspore.float32)
        labels = Tensor([0, 1, 2])
        inputs = Tensor(np.random.randint(0, 9, [3, 10]), mindspore.float32)

        loss = SampledSoftmaxLoss(num_sampled=4,
                                  num_classes=7,
                                  num_true=1,
                                  seed=1)
        output = loss(weights, biases, labels, inputs)

        assert output.shape == labels.shape
コード例 #3
0
ファイル: train.py プロジェクト: zhangjinrong/mindspore
from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net

from src.unet import UNet
from src.data_loader import create_dataset
from src.loss import CrossEntropyWithLogits
from src.utils import StepLossTimeMonitor
from src.config import cfg_unet

device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE,
                    device_target="Ascend",
                    save_graphs=False,
                    device_id=device_id)

mindspore.set_seed(1)


def train_net(data_dir,
              cross_valid_ind=1,
              epochs=400,
              batch_size=16,
              lr=0.0001,
              run_distribute=False,
              cfg=None):

    if run_distribute:
        init()
        group_size = get_group_size()
        parallel_mode = ParallelMode.DATA_PARALLEL
        context.set_auto_parallel_context(parallel_mode=parallel_mode,
コード例 #4
0
def test_resnet50_quant():
    set_seed(1)
    context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
    config = config_quant
    print("training configure: {}".format(config))
    epoch_size = config.epoch_size

    # define network
    net = resnet50_quant(class_num=config.class_num)
    net.set_train(True)

    # define loss
    if not config.use_label_smooth:
        config.label_smooth_factor = 0.0
    loss = CrossEntropy(smooth_factor=config.label_smooth_factor,
                        num_classes=config.class_num)
    #loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)

    # define dataset
    dataset = create_dataset(dataset_path=dataset_path,
                             config=config,
                             repeat_num=1,
                             batch_size=config.batch_size)
    step_size = dataset.get_dataset_size()

    # convert fusion network to quantization aware network
    net = quant.convert_quant_network(net,
                                      bn_fold=True,
                                      per_channel=[True, False],
                                      symmetric=[True, False])

    # get learning rate
    lr = Tensor(
        get_lr(lr_init=config.lr_init,
               lr_end=0.0,
               lr_max=config.lr_max,
               warmup_epochs=config.warmup_epochs,
               total_epochs=config.epoch_size,
               steps_per_epoch=step_size,
               lr_decay_mode='cosine'))

    # define optimization
    opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr,
                   config.momentum, config.weight_decay, config.loss_scale)

    # define model
    #model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics={'acc'})
    model = Model(net, loss_fn=loss, optimizer=opt)

    print("============== Starting Training ==============")
    monitor = Monitor(lr_init=lr.asnumpy(),
                      step_threshold=config.step_threshold)

    callbacks = [monitor]
    model.train(epoch_size,
                dataset,
                callbacks=callbacks,
                dataset_sink_mode=False)
    print("============== End Training ==============")

    expect_avg_step_loss = 2.40
    avg_step_loss = np.mean(np.array(monitor.losses))

    print("average step loss:{}".format(avg_step_loss))
    assert avg_step_loss < expect_avg_step_loss
コード例 #5
0
                         eval_ckpt_step=args_opt.eval_ckpt_step,
                         save_ckpt_dir=save_ckpt_dir,
                         embedding_bits=student_net_cfg.embedding_bits,
                         weight_bits=student_net_cfg.weight_bits,
                         clip_value=student_net_cfg.weight_clip_value,
                         metrics=task.metrics)
        ]
    else:
        callback = [
            StepCallBack(),
            ModelSaveCkpt(network=netwithloss.bert,
                          save_ckpt_step=args_opt.save_ckpt_step,
                          max_ckpt_num=args_opt.max_ckpt_num,
                          output_dir=save_ckpt_dir,
                          embedding_bits=student_net_cfg.embedding_bits,
                          weight_bits=student_net_cfg.weight_bits,
                          clip_value=student_net_cfg.weight_clip_value)
        ]
    model = Model(netwithgrads)
    model.train(repeat_count,
                train_dataset,
                callbacks=callback,
                dataset_sink_mode=(args_opt.enable_data_sink == 'true'),
                sink_size=args_opt.data_sink_steps)


if __name__ == '__main__':
    args = parse_args()
    set_seed(args.seed)
    run_task_distill(args)
コード例 #6
0
    def learn(self, data, columns=None, **kwargs):
        """Set up and run the Gran-DAG algorithm

        Parameters
        ----------
        data: numpy.ndarray or Tensor
            include Tensor.data
        columns : Index or array-like
            Column labels to use for resulting tensor. Will default to
            RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
        """

        # Control as much randomness as possible
        set_seed(self.random_seed)

        # set context for running environment
        context.set_context(mode=context.PYNATIVE_MODE)
        devices_list = ['CPU', 'GPU', 'Ascend']
        if self.device_type.lower() == 'ascend':
            self.device_type = 'Ascend'
        else:
            self.device_type = self.device_type.upper()
        if self.device_type not in devices_list:
            raise ValueError("Only support 'CPU', 'GPU' and 'Ascend'.")
        context.set_context(device_target=self.device_type,
                            device_id=self.device_ids)

        # create learning model and ground truth model
        data = Tensor(data, columns=columns)

        if data.shape[1] != self.input_dim:
            raise ValueError("The number of variables is `{}`, "
                             "the param input_dim is `{}`, "
                             "they must be consistent.".format(
                                 data.shape[1], self.input_dim))

        if self.model_name == "NonLinGauss":
            self.model = NonlinearGauss(input_dim=self.input_dim,
                                        hidden_num=self.hidden_num,
                                        hidden_dim=self.hidden_dim,
                                        output_dim=self.OUT_DIM,
                                        mu=self.mu,
                                        lamb=self.lamb,
                                        nonlinear=self.nonlinear,
                                        norm_prod=self.norm_prod,
                                        square_prod=self.square_prod)
        elif self.model_name == "NonLinGaussANM":
            self.model = NonlinearGaussANM(input_dim=self.input_dim,
                                           hidden_num=self.hidden_num,
                                           hidden_dim=self.hidden_dim,
                                           output_dim=self.OUT_DIM_ANM,
                                           mu=self.mu,
                                           lamb=self.lamb,
                                           nonlinear=self.nonlinear,
                                           norm_prod=self.norm_prod,
                                           square_prod=self.square_prod)
        else:
            raise ValueError(
                "model has to be in {NonLinGauss, NonLinGaussANM}")

        # create NormalizationData
        train_data = NormalizationData(data,
                                       train=True,
                                       normalize=self.normalize)
        test_data = NormalizationData(data,
                                      train=False,
                                      normalize=self.normalize,
                                      mean=train_data.mean,
                                      std=train_data.std)

        # apply preliminary neighborhood selection if input_dim > 50
        if self.use_pns:
            if self.num_neighbors is None:
                num_neighbors = self.input_dim
            else:
                num_neighbors = self.num_neighbors

            self.model = neighbors_selection(model=self.model,
                                             all_samples=data,
                                             num_neighbors=num_neighbors,
                                             thresh=self.pns_thresh)

        # update self.model by train
        self._train(train_data=train_data, test_data=test_data)

        # get DAG by run _to_dag
        self._to_dag(train_data)

        self._causal_matrix = Tensor(self.model.adjacency.asnumpy(),
                                     index=data.columns,
                                     columns=data.columns)
コード例 #7
0
def train_net(distribute, imagenet, epochs):
    """Train net"""
    set_seed(1)
    device_id = int(os.getenv('DEVICE_ID', '0'))
    context.set_context(mode=context.GRAPH_MODE,
                        device_target="Ascend",
                        save_graphs=False,
                        device_id=device_id)

    if imagenet == 1:
        train_dataset = ImgData(args)
    else:
        train_dataset = data.Data(args).loader_train

    if distribute:
        init()
        rank_id = get_rank()
        rank_size = get_group_size()
        parallel_mode = ParallelMode.DATA_PARALLEL
        context.set_auto_parallel_context(parallel_mode=parallel_mode,
                                          device_num=rank_size,
                                          gradients_mean=True)
        print('Rank {}, rank_size {}'.format(rank_id, rank_size))
        if imagenet == 1:
            train_de_dataset = ds.GeneratorDataset(
                train_dataset,
                ["HR", "Rain", "LRx2", "LRx3", "LRx4", "scales", "filename"],
                num_shards=rank_size,
                shard_id=args.rank,
                shuffle=True)
        else:
            train_de_dataset = ds.GeneratorDataset(train_dataset, ["LR", "HR"],
                                                   num_shards=rank_size,
                                                   shard_id=rank_id,
                                                   shuffle=True)
    else:
        if imagenet == 1:
            train_de_dataset = ds.GeneratorDataset(
                train_dataset,
                ["HR", "Rain", "LRx2", "LRx3", "LRx4", "scales", "filename"],
                shuffle=True)
        else:
            train_de_dataset = ds.GeneratorDataset(train_dataset, ["LR", "HR"],
                                                   shuffle=True)

    resize_fuc = bicubic()
    train_de_dataset = train_de_dataset.project(
        columns=["HR", "Rain", "LRx2", "LRx3", "LRx4", "filename"])
    train_de_dataset = train_de_dataset.batch(
        args.batch_size,
        input_columns=["HR", "Rain", "LRx2", "LRx3", "LRx4", "filename"],
        output_columns=["LR", "HR", "idx", "filename"],
        drop_remainder=True,
        per_batch_map=resize_fuc.forward)
    train_loader = train_de_dataset.create_dict_iterator(output_numpy=True)

    net_work = IPT(args)
    init_weights(net_work, init_type='he', init_gain=1.0)
    print("Init net weight successfully")
    if args.pth_path:
        param_dict = load_checkpoint(args.pth_path)
        load_param_into_net(net_work, param_dict)
        print("Load net weight successfully")

    train_func = Trainer(args, train_loader, net_work)
    for epoch in range(0, epochs):
        train_func.update_learning_rate(epoch)
        train_func.train()
コード例 #8
0
from mindspore.profiler import Profiler

from airnetpack.model import SchNet, AirNet, MolCalculator
from airnetpack.cutoff import MollifierCutoff
from airnetpack.acsf import LogGaussianDistribution
from airnetpack.activations import ShiftedSoftplus, Swish
from airnetpack.cutoff import CosineCutoff, SmoothCutoff
from airnetpack.train import SquareLoss, AbsLoss, ForceAbsLoss, MLoss
from airnetpack.train import WithForceLossCell, WithForceEvalCell
from airnetpack.train import Recorder, MAE, MSE

if __name__ == '__main__':

    # np.set_printoptions(threshold=np.inf)
    seed = 1111
    ms.set_seed(seed)

    summary_collector = SummaryCollector(summary_dir='./summary_dir')

    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
    # context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")

    # profiler = Profiler()

    mol_name = 'ethanol'
    train_file = './' + mol_name + '_train_1024.npz'
    valid_file = './' + mol_name + '_valid_128.npz'
    train_data = np.load(train_file)
    valid_data = np.load(valid_file)

    atomic_numbers = Tensor(train_data['z'], ms.int32)