示例#1
0
    def __init__(self, path, model, model_copy, img_save_path):
        self.path = path
        self.model = model
        self.model_copy = model_copy
        self.img_save_path = img_save_path
        # 使用的设备
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        # 网络
        self.net = unet.UNet().to(self.device)
        # 优化器,这里用的Adam,跑得快点
        self.opt = torch.optim.Adam(self.net.parameters())
        # 这里直接使用二分类交叉熵来训练,效果可能不那么好
        # 可以使用其他损失,比如DiceLoss、FocalLoss之类的
        self.loss_func = nn.BCELoss()
        # 设备好,batch_size和num_workers可以给大点
        self.loader = DataLoader(dataset.Datasets(path),
                                 batch_size=1,
                                 shuffle=True,
                                 num_workers=0)

        # 判断是否存在模型
        if os.path.exists(self.model):
            self.net.load_state_dict(torch.load(model))
            print(f"Loaded{model}!")
        else:
            print("No Param!")
        os.makedirs(img_save_path, exist_ok=True)
示例#2
0
    def __init__(self, path, model, model_copy, img_save_path):
        self.path = path
        self.model = model
        self.model_copy = model_copy
        self.img_save_path = img_save_path
        # 使用的设备
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        # 网络
        self.net = unet.UNet().to(self.device)

        self.opt = torch.optim.Adam(self.net.parameters(), lr=0.00001)
        self.loss_func = nn.BCELoss()

        self.loader = DataLoader(dataset=dataset.Datasets(path),
                                 batch_size=4,
                                 shuffle=True,
                                 num_workers=4)

        if os.path.exists(self.model):
            self.net.load_state_dict(torch.load(model))
            print(f'loaded{model}!')
        else:
            print('No Param!')
        os.makedirs(img_save_path, exist_ok=True)
示例#3
0
    try:
        # parse algorithm specification file, which is used by TemplateExecutor for madlib
        # generate "create table schema.algorithmname" statement
        spec = ExecutorSpec(Path.CfgSpecPath + Path.algorithmsSpecXml, configer.resultDBSchema)
        specName = Path.algorithmsSpecXml.split(".")[0]
        spec.writeCreateSQL(Path.BootstrapDir + specName + ".sql")

    except Exception, exp:
        print exp
        print "Error when parsing algorithm"
        sys.exit()

    try:
        # parse dataset
        datasets = dataset.Datasets(Path.CfgSpecPath + Path.datasetXml)
        datasets.getDataSets()
    except Exception, exp:
        print exp
        print "Error when parsing dataset"
        sys.exit()

    # generate test cases according to each spec xml file
    for specXml in glob.glob(Path.caseSpecPath + "*.xml"):
        # test case spec xml file name
        name = specXml.split("/")[len(specXml.split("/"))-1].split(".")[0]

        # test cases's file absolute path
        scheduleFile    =   Path.TestCaseDir + "case_" + name
        caseSQLFile     =   Path.TestCaseDir + name + ".sql_out"
        suiteSqlFile    =   Path.TestCaseDir + name + "suite.sql"
示例#4
0
def run_training():
    '''train the Neural Network'''
    # sanity check
    assert (FLAGS.input_data_type == 'float' or FLAGS.input_data_type == 'int')
    assert (FLAGS.output_data_type == 'float'
            or FLAGS.output_data_type == 'int')
    # import the dataset
    data_sets = dataset.Datasets(FLAGS.data_dir, FLAGS.separate_file,
                                 FLAGS.input_data_type, FLAGS.output_data_type)
    #for hotspot training
    '''
    data_sets = dataset.Datasets(FLAGS.data_dir,
            FLAGS.separate_file,
            FLAGS.input_data_type, FLAGS.output_data_type,
            FLAGS.tile_size, FLAGS.num_maps)
    '''

    with tf.Graph().as_default():
        # placeholder
        input_pl, golden_pl = util.generate_placeholder(
            data_sets.num_in_neuron, data_sets.num_out_neuron,
            FLAGS.batch_size, FLAGS.input_data_type, FLAGS.output_data_type)
        # build graph
        if FLAGS.hidden1 == 0:
            assert (FLAGS.hidden2 == 0)
            outputs = util.layer('output_layer', input_pl,
                                 data_sets.num_in_neuron,
                                 data_sets.num_out_neuron, None)
        else:
            hidden1 = util.layer('hidden1', input_pl, data_sets.num_in_neuron,
                                 FLAGS.hidden1, util.fast_sigmoid)
            if FLAGS.hidden2 == 0:
                outputs = util.layer('output_layer', hidden1, FLAGS.hidden1,
                                     data_sets.num_out_neuron, None)
            else:
                hidden2 = util.layer('hidden2', hidden1, FLAGS.hidden1,
                                     FLAGS.hidden2, util.fast_sigmoid)
                outputs = util.layer('output_layer', hidden2, FLAGS.hidden2,
                                     data_sets.num_out_neuron, None)

        # loss
        #loss = bm.loss(outputs, golden_pl)
        loss = util.loss(outputs, golden_pl, FLAGS.benchmark)

        # train
        #train_op = bm.training(loss, FLAGS.learning_rate)
        train_op = util.training(loss, FLAGS.learning_rate)

        # accumulated error for one batch of data
        error = util.error(outputs, golden_pl, FLAGS.benchmark)

        # summary - not necessary
        summary = tf.merge_all_summaries()

        # init
        init = tf.initialize_all_variables()

        # sess
        sess = tf.Session()

        # summary writer - not necessary
        summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph)

        # everything built, run init
        sess.run(init)

        # start training
        #_, max_steps = data_sets.train.max_steps(FLAGS.batch_size)
        for step in xrange(FLAGS.max_steps):
            feed_dict = util.fill_feed_dict(data_sets.train, input_pl,
                                            golden_pl, FLAGS.batch_size)
            sess.run(train_op, feed_dict=feed_dict)

            # print the loss every 100 steps
            # write the summary
            # evaluate the model
            if not step % 100:
                print('step %d: loss = %.2f' %
                      (step, sess.run(loss, feed_dict=feed_dict)))

                summary_str = sess.run(summary, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()
                '''
                print('training data evaluation')
                util.do_eval(sess, error,
                        input_pl, golden_pl,
                        FLAGS.batch_size, data_sets.train)
                '''
                print('validation data evaluation')
                util.do_eval(sess, error, input_pl, golden_pl,
                             FLAGS.batch_size, data_sets.validate)

        # final accuracy
        print('test data evaluation')
        util.do_eval(sess, error, input_pl, golden_pl, FLAGS.batch_size,
                     data_sets.test)

        # filename for saving
        savefile = str(data_sets.num_in_neuron) + "_" + str(
            FLAGS.hidden1) + "_" + str(FLAGS.hidden2) + "_" + str(
                data_sets.num_out_neuron) + ".txt"

        # save weights and biases
        util.save_config(sess, NUM_LAYERS, FLAGS.config_dir, savefile)

        # save trained output
        #util.save_output(sess, data_sets.train, outputs, FLAGS.data_dir)
        #need to fetch original input data
        output_save = sess.run(outputs,
                               feed_dict={input_pl: data_sets.input_data})
        np.savetxt(FLAGS.data_dir + "train_result/" + savefile,
                   output_save,
                   delimiter=" ")