Example #1
0
        "e2e_dump_settings": {
            "enable": True,
            "trans_flag": False
        }
    }
    with open("./data_dump.json", "w", encoding="GBK") as f:
        json.dump(data_dump, f)
    os.environ['MINDSPORE_DUMP_CONFIG'] = abspath + "/data_dump.json"

def set_log_info():
    os.environ['GLOG_v'] = '1'
    os.environ['GLOG_logtostderr'] = '1'
    os.environ['logger_maxBytes'] = '5242880'
    os.environ['GLOG_log_dir'] = 'D:/' if os.name == "nt" else '/var/log/mindspore'
    os.environ['logger_backupCount'] = '10'
    print(logger.get_log_config())

if __name__ == "__main__":
    set_dump_info()
    set_log_info()
    context.set_context(mode=context.GRAPH_MODE)
    train_dataset = create_train_dataset()
    eval_dataset = create_eval_dataset()
    net = Net()
    net_opt = Momentum(net.trainable_params(), 0.01, 0.9)
    net_loss = SoftmaxCrossEntropyWithLogits(reduction='mean')
    model = Model(network=net, loss_fn=net_loss, optimizer=net_opt, metrics={'Accuracy': nn.Accuracy()})
    model.train(epoch=100,
                train_dataset=train_dataset,
                callbacks=[LossMonitor(), StopAtTime(3), SaveCallback(model, eval_dataset)])
Example #2
0
                    help="set the eps of fgsm")
args, _ = parser.parse_known_args()

images = []
labels = []
test_images = []
test_labels = []
predict_labels = []

net = LeNet5()
mnist_path = "./datasets/MNIST_Data/"
param_dict = load_checkpoint("checkpoint_lenet-1_1875.ckpt")
load_param_into_net(net, param_dict)
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
net_opt = nn.Momentum(net.trainable_params(), learning_rate=0.01, momentum=0.9)
model = Model(net, net_loss, net_opt, metrics={"Accuracy": nn.Accuracy()})

ds_test = create_dataset(os.path.join(mnist_path, "test"),
                         batch_size=32).create_dict_iterator(output_numpy=True)

for data in ds_test:
    images = data['image'].astype(np.float32)
    labels = data['label']
    test_images.append(images)
    test_labels.append(labels)
    pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(), axis=1)
    predict_labels.append(pred_labels)

test_images = np.concatenate(test_images)
predict_labels = np.concatenate(predict_labels)
true_labels = np.concatenate(test_labels)
Example #3
0
def set_log_info():
    os.environ['GLOG_v'] = '1'
    os.environ['GLOG_logtostderr'] = '1'
    os.environ['logger_maxBytes'] = '5242880'
    os.environ[
        'GLOG_log_dir'] = 'D:/' if os.name == "nt" else '/var/log/mindspore'
    os.environ['logger_backupCount'] = '10'
    print(logger.get_log_config())


if __name__ == "__main__":
    set_dump_info()
    set_log_info()
    context.set_context(mode=context.GRAPH_MODE)
    train_dataset = create_train_dataset()
    eval_dataset = create_eval_dataset()
    net = Net()
    net_opt = Momentum(net.trainable_params(), 0.01, 0.9)
    net_loss = SoftmaxCrossEntropyWithLogits(reduction='mean')
    model = Model(network=net,
                  loss_fn=net_loss,
                  optimizer=net_opt,
                  metrics={'Accuracy': nn.Accuracy()})
    model.train(epoch=100,
                train_dataset=train_dataset,
                callbacks=[
                    LossMonitor(),
                    StopAtTime(3),
                    SaveCallback(model, eval_dataset)
                ])
Example #4
0
"""
import mindspore.nn as nn
from mindspore.nn import Momentum, SoftmaxCrossEntropyWithLogits
from mindspore import Model, context
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor

from src.dataset import create_train_dataset, create_eval_dataset
from src.net import Net


if __name__ == "__main__":
    context.set_context(mode=context.GRAPH_MODE)
    ds_train = create_train_dataset()
    ds_eval = create_eval_dataset()
    net = Net()
    net_opt = Momentum(net.trainable_params(), 0.01, 0.9)
    net_loss = SoftmaxCrossEntropyWithLogits(reduction='mean')
    metrics = {
        'Accuracy': nn.Accuracy(),
        'Loss': nn.Loss(),
        'Precision': nn.Precision(),
        'Recall': nn.Recall(),
        'F1_score': nn.F1()
    }
    config_ck = CheckpointConfig(save_checkpoint_steps=1000, keep_checkpoint_max=10)
    ckpoint = ModelCheckpoint(prefix="CKPT", config=config_ck)
    model = Model(network=net, loss_fn=net_loss, optimizer=net_opt, metrics=metrics)
    model.train(epoch=2, train_dataset=ds_train, callbacks=[ckpoint, LossMonitor()])
    result = model.eval(ds_eval)
    print(result)