Exemplo n.º 1
0
def train():
    x_data, y_data = read(os.getcwd() + "\datasets\\1\\train_data.csv")

    x_train, x_validation, x_test, y_train, y_validation, y_test = get_data(
        x_data, y_data)
    path = input("Input output file name for neural network(enter for end): ")
    while path != "":
        path = path + ".net"
        net, res = create_nn(x_train, y_train,
                             os.getcwd() + "\\" + path, 6, 1000, 0.01)
        test(x_test, y_test, net)
        path = input(
            "Input output file name for neural network(enter for end): ")
Exemplo n.º 2
0
def test_model(
    use_cuda,
    dset_folder,
    disable_tqdm=False,
):
    best_model = GAT_MNIST(num_features=util.NUM_FEATURES,
                           num_classes=util.NUM_CLASSES)
    util.load_model("best", best_model)
    if use_cuda:
        best_model = best_model.cuda()

    test_dset = MNIST(dset_folder, train=False, download=True)
    test_imgs = test_dset.data.unsqueeze(-1).numpy().astype(np.float64)
    with multiprocessing.Pool() as p:
        test_graphs = np.array(p.map(util.get_graph_from_image, test_imgs))
    del test_imgs
    test_labels = test_dset.targets.numpy()

    test_accs = util.test(
        best_model,
        test_graphs,
        test_labels,
        list(range(len(test_labels))),
        use_cuda,
        desc="Test ",
        disable_tqdm=disable_tqdm,
    )
    test_acc = 100 * np.mean(test_accs)
    print("TEST RESULTS: {acc:.2f}%".format(acc=test_acc))
Exemplo n.º 3
0
def train():
    x_data, y_data = read()
    x_data = generate_data(x_data)
    x_train, x_validation, x_test, y_train, y_validation, y_test = get_data_ds3(
        x_data, y_data)
    x_train = x_train + x_validation
    y_train = y_train + y_validation

    path = input("Input output file name for neural network(enter for end): ")
    while path != "":
        path = path + ".net"
        net, res = create_nn(x_train, y_train,
                             os.getcwd() + "\\" + path, 7, 6000, 0.001)
        test(x_test, y_test, net)
        path = input(
            "Input output file name for neural network(enter for end): ")
Exemplo n.º 4
0
def solve(year: int, day: int) -> None:
    click.echo(f'Year {year}, Day {day}')
    module = import_module(f'{year}.{day:02d}')
    data = read_input(year, day)

    tc1 = read_tc(year, day, 1)
    if tc1:
        test(module.solve_1, tc1)
    part_1_time, part_1_solution = timed(module.solve_1)(data)
    click.echo(f'Solution 1: {part_1_solution}, Took: {part_1_time}ms')

    tc2 = read_tc(year, day, 2)
    if tc2:
        test(module.solve_2, tc2)
    part_2_time, part_2_solution = timed(module.solve_2)(data)
    click.echo(f'Solution 2: {part_2_solution}, Took: {part_2_time}ms')
Exemplo n.º 5
0
def testModExp(number1, number2, number3):
    result = hex(pow(number1, number2, number3) & util.bitMask)[2:]

    return util.test(
        result, "modexp {0} {1} {2}".format(util.toBigIntString(number1),
                                            util.toBigIntString(number2),
                                            util.toBigIntString(number3)))
Exemplo n.º 6
0
def test_asyncio():
    print('[py/asyncio]')

    oLooper = util.Looper()
    oFuture = oLooper.runTask(util.isValid())
    if oFuture.result():
        for _ in range(10):
            oLooper.addTask(util.test(2))
        oLooper.run()
Exemplo n.º 7
0
    def post(self):
        file1 = self.request.files['file1'][0]
        original_fname = file1['filename']
        extension = os.path.splitext(original_fname)[1]
        fname = ''.join(random.choice(string.ascii_lowercase + string.digits) for x in xrange(6))
        final_filename = fname+extension
        with open(final_filename, 'w') as out_file:
            out_file.write(file1['body'])

        font = "dejavusans-alphanumeric"
        fontimg = mpimg.imread('train/' + font + '.jpg')
        util.train(fontimg, font)

        testimg = mpimg.imread(final_filename)
        self.write(util.test(testimg, font))
Exemplo n.º 8
0
    def post(self):
        file1 = self.request.files['file1'][0]
        original_fname = file1['filename']
        extension = os.path.splitext(original_fname)[1]
        fname = ''.join(
            random.choice(string.ascii_lowercase + string.digits)
            for x in xrange(6))
        final_filename = fname + extension
        with open(final_filename, 'w') as out_file:
            out_file.write(file1['body'])

        font = "dejavusans-alphanumeric"
        fontimg = mpimg.imread('train/' + font + '.jpg')
        util.train(fontimg, font)

        testimg = mpimg.imread(final_filename)
        self.write(util.test(testimg, font))
Exemplo n.º 9
0
def testSubtract(number1, number2):
    result = number1 - number2

    if result < 0:
        power = max((len(hex(-result)) // 8 + 1) * 32, 32)
        result = (1 << power) + result
        result = hex(result)[2:]
        # Workaround for bizarre "f" and "0" prefixing.
        while result[:8] == "f" * 8:
            result = result[8:]
        while result[0] == '0':
            result = result[1:]
    else:
        result = hex(result)[2:]

    return util.test(
        result, "subtract {0} {1}".format(util.toBigIntString(number1),
                                          util.toBigIntString(number2)))
Exemplo n.º 10
0
def main(args, test_file, vocab_file, embeddings_file, pretrained_file, max_length=50, gpu_index=0, batch_size=128):

    device = torch.device("cuda:{}".format(gpu_index) if torch.cuda.is_available() else "cpu")
    print(20 * "=", " Preparing for testing ", 20 * "=")
    if platform == "linux" or platform == "linux2":
        checkpoint = torch.load(pretrained_file)
    else:
        checkpoint = torch.load(pretrained_file, map_location=device)
    # Retrieving model parameters from checkpoint.
    embeddings = load_embeddings(embeddings_file)
    print("\t* Loading test data...")    
    test_data = LCQMC_Dataset(test_file, vocab_file, max_length)
    test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
    print("\t* Building model...")
    model = RE2(args, embeddings, device=device).to(device)
    model.load_state_dict(checkpoint["model"])
    print(20 * "=", " Testing RE2 model on device: {} ".format(device), 20 * "=")
    batch_time, total_time, accuracy, auc = test(model, test_loader)
    print("\n-> Average batch processing time: {:.4f}s, total test time: {:.4f}s, accuracy: {:.4f}%, auc: {:.4f}\n".format(batch_time, total_time, (accuracy*100), auc))
def model_load_test(args,
                    test_df,
                    vocab_file,
                    embeddings_file,
                    pretrained_file,
                    test_prediction_dir,
                    test_prediction_name,
                    mode,
                    max_length=50,
                    gpu_index=0,
                    batch_size=128):

    device = torch.device(
        "cuda:{}".format(gpu_index) if torch.cuda.is_available() else "cpu")
    print(20 * "=", " Preparing for testing ", 20 * "=")
    if platform == "linux" or platform == "linux2":
        checkpoint = torch.load(pretrained_file)
    else:
        checkpoint = torch.load(pretrained_file, map_location=device)
    # Retrieving model parameters from checkpoint.
    embeddings = load_embeddings(embeddings_file)
    print("\t* Loading test data...")
    test_data = My_Dataset(test_df, vocab_file, max_length, mode)
    test_loader = DataLoader(test_data, shuffle=False, batch_size=batch_size)
    print("\t* Building model...")
    model = RE2(args, embeddings, device=device).to(device)
    model.load_state_dict(checkpoint["model"])
    print(20 * "=", " Testing RE2 model on device: {} ".format(device),
          20 * "=")
    batch_time, total_time, accuracy, predictions = test(model, test_loader)
    print(
        "\n-> Average batch processing time: {:.4f}s, total test time: {:.4f}s, accuracy: {:.4f}%\n"
        .format(batch_time, total_time, (accuracy * 100)))
    test_prediction = pd.DataFrame({'prediction': predictions})
    if not os.path.exists(test_prediction_dir):
        os.makedirs(test_prediction_dir)
    test_prediction.to_csv(os.path.join(test_prediction_dir,
                                        test_prediction_name),
                           index=False)
Exemplo n.º 12
0
def test_model(
    use_cuda,
    dset_folder,
    disable_tqdm=False,
):
    best_model = GAT_MNIST(num_features=util.NUM_FEATURES,
                           num_classes=util.NUM_CLASSES)
    util.load_model("best", best_model)
    if use_cuda:
        best_model = best_model.cuda()

    test_graphs, test_labels = process_dataset(dset_folder, "test")

    test_accs = util.test(
        best_model,
        test_graphs,
        test_labels,
        list(range(len(test_labels))),
        use_cuda,
        desc="Test ",
        disable_tqdm=disable_tqdm,
    )
    test_acc = 100 * np.mean(test_accs)
    print("TEST RESULTS: {acc:.2f}%".format(acc=test_acc))
Exemplo n.º 13
0
                    int(contents[10].split(",")[9]) == 4 
                        and int(contents[12].split(",")[9]) == 5
                        and int(contents[22].split(",")[9]) == 1
                        and "ENEMY: Zed" in contents[26]
                        and "10" in contents[27]
                        and "2"  in contents[28]
                        and "4"  in contents[29]
                        and "3"  in contents[30],
                    "bad config file contents")
                    
    # Clean-up testing resources
    test_file.close()
    clean_map(file_name)
    
    # End Message
    util.sprint( "Engine testing Complete! Failed: [" + str(test.failed) + "/"
            + str(test.run) + "]" )
            
    # Return testing object
    return test
    
### SCRIPT ####

if __name__ == '__main__':
    util.test()
    util_test = test_util()
    clean_map(file_name)
    clean_map('__temp2__')
    maps_test = test_maps()
    clean_map(file_name)
    engine_test = test_engine()
Exemplo n.º 14
0
def main(args_parser):
    parser = args_parser
    args = parser.parse_args()

    with tf.device('/cpu:0'):
        #tf.reset_default_graph()
        DATASET_PATH = args.datasetPath
        LEARNING_RATE_1 = args.learningRate
        EPOCHS = args.epochs
        BATCH_SIZE = args.batchSize
        NUM_CLASSES = args.numClasses
        Z_SCORE = args.zScore
        WEIGHT_DECAY_1 = args.weightDecay

        #Placeholders
        learning_rate = tf.placeholder(tf.float32,
                                       shape=[],
                                       name='learning_rate')
        weight_decay = tf.placeholder(tf.float32,
                                      shape=[],
                                      name="weight_decay")

        #Dataset
        test_features, test_labels, test_filenames = util.test_input_fn(
            DATASET_PATH, BATCH_SIZE, EPOCHS)

        #Model
        _, _, test_cross_entropy, test_conf_matrix_op, test_accuracy = initiate_vgg_model(
            test_features,
            test_labels,
            test_filenames,
            NUM_CLASSES,
            weight_decay,
            learning_rate,
            handle="testing")
        saver = tf.train.Saver()

        with tf.Session() as sess:
            with np.printoptions(threshold=np.inf):
                if not os.path.isdir("./hcc_output/"):
                    raise Exception(
                        "Model file not found. Use Train.py to train a model")
                else:
                    saver.restore(sess, "./hcc_output/model.ckpt")
                    print("Model restored from Saver files")

                writer = tf.summary.FileWriter("./short_tensorboard_logs/")
                writer.add_graph(sess.graph)
                merged_summary = tf.summary.merge_all()
                sess.run([
                    tf.global_variables_initializer(),
                    tf.local_variables_initializer()
                ])

                for i in range(100):
                    print("Current Testing Iteration : {}/{}".format(i, 100))
                    summary, _, test_ce, test_acc = util.test(
                        BATCH_SIZE, learning_rate, weight_decay, sess,
                        test_cross_entropy, test_conf_matrix_op, NUM_CLASSES,
                        LEARNING_RATE_1, WEIGHT_DECAY_1, merged_summary,
                        test_accuracy)
                    test_value1, test_value2 = util.confidence_interval(
                        test_acc, Z_SCORE, 32)
                    print("Testing Accuracy : {}".format(test_acc))
                    print("Testing Loss (Cross Entropy) : {}".format(test_ce))
                    print("Testing Confidence Interval: [{} , {}]".format(
                        test_value2, test_value1))
                    writer.add_summary(summary, i)
Exemplo n.º 15
0
#!/usr/bin/env python

import os, json, socket, sys, util

path = os.path.split(os.path.realpath(__file__))[0]
os.chdir(path)
sys.path.insert(0, path)

os.environ['DEVEL'] = 'yes'
os.environ['PGHOST'] = os.path.join(path, 'postgres_data/socket')

if 'TMUX' in os.environ:  # see https://github.com/sagemathinc/smc/issues/563
    del os.environ['TMUX']

util.chdir()

ports = util.get_ports()
hostname = '0.0.0.0'

cmd = "service_hub.py --dev --foreground --hostname={hostname} --port={hub_port} --proxy_port=0 --gap=0 {test} start".format(
    hostname=hostname, hub_port=ports['hub-api'], test=util.test())

util.cmd(cmd)
Exemplo n.º 16
0
def testAnd(number1, number2):
    result = hex((number1 & number2) & util.bitMask)[2:]

    return util.test(
        result, "and {0} {1}".format(util.toBigIntString(number1),
                                     util.toBigIntString(number2)))
Exemplo n.º 17
0
def run(rank, size):

    # set random seed
    torch.manual_seed(args.randomSeed+rank)
    np.random.seed(args.randomSeed)

    # load data
    train_loader, test_loader = util.partition_dataset(rank, size, args)    
    num_batches = ceil(len(train_loader.dataset) / float(args.bs))

    # load base network topology
    subGraphs = util.select_graph(args.graphid)
    
    # define graph activation scheme
    if args.matcha:
        GP = MatchaProcessor(subGraphs, args.budget, rank, size, args.epoch*num_batches, True)
    else:
        GP = FixedProcessor(subGraphs, args.budget, rank, size, args.epoch*num_batches, True)

    # define communicator
    if args.compress:
        communicator = ChocoCommunicator(rank, size, GP, 0.9, args.consensus_lr)
    else:
        communicator = decenCommunicator(rank, size, GP)

    # select neural network model
    model = util.select_model(10, args)
    model = model.cuda()
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.SGD(model.parameters(), 
                          lr=args.lr,
                          momentum=args.momentum, 
                          weight_decay=5e-4,
                          nesterov=args.nesterov)
    
    # guarantee all local models start from the same point
    # can be removed    
    sync_allreduce(model, rank, size)

    # init recorder
    comp_time, comm_time = 0, 0
    recorder = util.Recorder(args,rank)
    losses = util.AverageMeter()
    top1 = util.AverageMeter()
    tic = time.time()
    itr = 0
    
    # start training
    for epoch in range(args.epoch):
        model.train()

        # Start training each epoch
        for batch_idx, (data, target) in enumerate(train_loader):
            start_time = time.time()
            # data loading 
            data, target = data.cuda(non_blocking = True), target.cuda(non_blocking = True)                
            
            # forward pass
            output = model(data)
            loss = criterion(output, target)

            # record training loss and accuracy
            record_start = time.time()
            acc1 = util.comp_accuracy(output, target)
            losses.update(loss.item(), data.size(0))
            top1.update(acc1[0], data.size(0))
            record_end = time.time()

            # backward pass
            loss.backward()
            update_learning_rate(optimizer, epoch, itr=batch_idx, itr_per_epoch=len(train_loader))

            # gradient step
            optimizer.step()
            optimizer.zero_grad()
            end_time = time.time()

            d_comp_time = (end_time - start_time - (record_end - record_start))
            comp_time += d_comp_time

            # communication happens here
            d_comm_time = communicator.communicate(model)
            comm_time += d_comm_time

            print("batch_idx: %d, rank: %d, comp_time: %.3f, comm_time: %.3f,epoch time: %.3f " % (batch_idx+1,rank,d_comp_time, d_comm_time, comp_time+ comm_time), end='\r')

        toc = time.time()
        record_time = toc - tic # time that includes anything
        epoch_time = comp_time + comm_time # only include important parts

        # evaluate test accuracy at the end of each epoch
        test_acc = util.test(model, test_loader)

        recorder.add_new(record_time,comp_time,comm_time,epoch_time,top1.avg,losses.avg,test_acc)
        print("rank: %d, epoch: %.3f, loss: %.3f, train_acc: %.3f, test_acc: %.3f epoch time: %.3f" % (rank, epoch, losses.avg, top1.avg, test_acc, epoch_time))
        if rank == 0:
            print("comp_time: %.3f, comm_time: %.3f, comp_time_budget: %.3f, comm_time_budget: %.3f" % (comp_time, comm_time, comp_time/epoch_time, comm_time/epoch_time))
       
        if epoch%10 == 0:
            recorder.save_to_file()

        # reset recorders
        comp_time, comm_time = 0, 0
        losses.reset()
        top1.reset()
        tic = time.time()

    recorder.save_to_file()
Exemplo n.º 18
0
# These are just test Python sources.
print("Hello!")

import sys
print(sys.path)

from time import gmtime, strftime
print('Current time is: ' + strftime("%Y-%m-%d %H:%M:%S", gmtime()))

from util import test
print(test(1))

import this

#from turtledemo import nim
#nim.main()
#nim.turtle.mainloop()

#print('loading turtledemo...')
#from turtledemo.wikipedia import main
#print('Finished loading turtledemo. Starting demo...')
#main()
#print('Finished demo.')

import twistedtest

print('Running twistedtest...')
twistedtest.run()
Exemplo n.º 19
0
                    "--cmd",
                    type=str,
                    default='train',
                    help="command")
 flags = parse.parse_args()
 settings = {}
 settings['data'] = flags.data
 settings['net'] = flags.net
 settings['loss'] = flags.loss
 settings['step'] = flags.step
 settings['dump'] = flags.dump + os.sep + flags.net
 settings['batch_size'] = flags.batch_size
 settings['test_batch_size'] = flags.test_batch_size
 settings['width'] = flags.img_width
 settings['height'] = flags.img_height
 settings['channel'] = flags.img_channel
 settings['pts_num'] = flags.points_num
 settings['grid_num'] = flags.grid_num
 settings['grid_dim'] = flags.grid_dim
 settings['epoch_num'] = flags.epoch_num
 if flags.gpu:
     os.environ['CUDA_VISIBLE_DEVICES'] = flags.gpu
 try:
     if flags.cmd == 'train':
         util.train(settings)
     elif flags.cmd == 'test':
         util.test(settings)
 except Exception, e:
     print(e)
 else:
     print('Done Runing')
Exemplo n.º 20
0
Arquivo: main.py Projeto: cilsat/pyxel
    def initUI(self):

        """
        Main Window global parameters
        """
        self.imgOriginal = np.array([])
        self.mainWidth = 1280
        self.mainHeight = 640
        self.main = QLabel()
        self.imgNpBefore = np.array([])
        self.imgNpAfter = np.array([])
        self.skin = mpimg.imread('res/skin.jpg')

        grid = QGridLayout()
        self.main.setLayout(grid)

        self.mainBefore = QLabel('Before')
        self.mainBefore.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
        self.mainBefore.setWordWrap(True)
        self.mainBefore.setFont(QFont('Monospace', 10))

        self.mainAfter = QLabel('After')
        self.mainAfter.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
        self.mainAfter.setWordWrap(True)
        self.mainAfter.setFont(QFont('Monospace', 10))

        grid.addWidget(self.mainBefore, 0, 0)
        grid.addWidget(self.mainAfter, 0, 1)

        """
        Menu Bar
        """

        # FILE MENU
        openFile = QAction('Open', self)
        openFile.setShortcut('Ctrl+O')
        openFile.setStatusTip('Open new File')
        openFile.triggered.connect(self.showDialog)

        exitAction = QAction('Exit', self)
        exitAction.setShortcut('Ctrl+Q')
        exitAction.setStatusTip('Exit application')
        exitAction.triggered.connect(self.close)

        moveLeft = QAction('Move Left', self)
        moveLeft.setShortcut('Ctrl+L')
        moveLeft.triggered.connect(lambda: self.updateBefore(self.imgNpAfter))


        # PROCESS MENU
        equalizeMenu = QAction('Equalize', self)
        equalizeMenu.triggered.connect(lambda: self.updateImgAfter(util.equalize(self.imgNpBefore)))

        histogramMenu = QAction('Histogram', self)
        #histogramMenu.triggered.connect(lambda: self.updateImgAfter(

        grayscaleMenu = QAction('Grayscale', self)
        grayscaleMenu.triggered.connect(lambda: self.updateImgAfter(util.getgrayscale(self.imgNpBefore)))

        binarizeMenu = QAction('Binarize', self)
        binarizeMenu.triggered.connect(lambda: self.updateImgAfter(util.otsu(self.imgNpBefore)))

        gaussianMenu = QAction('Smooth', self)
        gaussianMenu.triggered.connect(lambda: self.updateImgAfter(util.convolvefft(util.gaussian_filt(), util.getgrayscale(self.imgNpBefore))))        

        resizeMenu = QAction('Resize', self)
        resizeMenu.triggered.connect(lambda: self.updateImgAfter(util.downsample(self.imgNpBefore)))

        segmentMenu = QAction('Segment', self)
        segmentMenu.triggered.connect(lambda: self.updateImgAfter(util.showobj(util.downsample(self.imgNpBefore, target_height=480), util.segment(util.thin(util.otsu(util.downsample(self.imgNpBefore, target_height=480), bg='light'))), box=False)))

        # EDGE DETECTION MENU
        averageMenu = QAction('Average', self)
        averageMenu.triggered.connect(lambda: self.updateImgAfter(util.degreezero(self.imgNpBefore, type="average")))

        differenceMenu = QAction('Difference', self)
        differenceMenu.triggered.connect(lambda: self.updateImgAfter(util.degreezero(self.imgNpBefore, type="difference")))

        homogenMenu = QAction('Homogen', self)
        homogenMenu.triggered.connect(lambda: self.updateImgAfter(util.degreezero(self.imgNpBefore, type="homogen")))

        sobelMenu = QAction('Sobel', self)
        sobelMenu.triggered.connect(lambda: self.updateImgAfter(util.degreeone(self.imgNpBefore, type="sobel")))

        prewittMenu = QAction('Prewitt', self)
        prewittMenu.triggered.connect(lambda: self.updateImgAfter(util.degreeone(self.imgNpBefore, type="prewitt")))

        freichenMenu = QAction('Frei-Chen', self)
        freichenMenu.triggered.connect(lambda: self.updateImgAfter(util.degreeone(self.imgNpBefore, type="freichen")))

        kirschMenu = QAction('Kirsch', self)
        kirschMenu.triggered.connect(lambda: self.updateImgAfter(util.degreetwo(self.imgNpBefore, type="kirsch")))


        # FEATURE MENU
        chaincodeMenu = QAction('Chain code', self)
        chaincodeMenu.triggered.connect(lambda: self.updateTxtAfter(str([util.getdirection(chain[n][0], chain[n][1]) for chain in util.segment(util.thin(self.imgNpBefore), cc=True) for n in xrange(len(chain))])))

        turncodeMenu = QAction('Turn code', self)
        turncodeMenu.triggered.connect(lambda: self.updateTxtAfter(str([util.getturncode(cc) for cc in util.segment(util.thin(self.imgNpBefore, bg='light'), cc=False)])))

        skeletonMenu = QAction('Zhang-Suen thinning', self)
        skeletonMenu.triggered.connect(lambda:self.updateImgAfter(util.zhangsuen(util.binarize(self.imgNpBefore, bg='light'))))

        skinMenu = QAction('Boundary detection', self)
        skinMenu.triggered.connect(lambda:self.updateImgAfter(util.thin(self.imgNpBefore, bg='light')))

        freemanMenu = QAction('Contour profile', self)


        # RECOGNITION MENU
        freemantrainfontMenu = QAction('Train Contour Font', self)
        freemantrainfontMenu.triggered.connect(lambda: util.train(self.imgNpBefore, feats='zs', order='font', setname='font')) 

        freemantrainplatMenu = QAction('Train ZS Plate (GNB)', self)
        freemantrainplatMenu.triggered.connect(lambda: util.train(self.imgNpBefore, feats='zs', order='plat', setname='plat'))

        cctctrainfontMenu = QAction('Train CC + TC Font', self)

        cctctrainplatMenu = QAction('Train CC + TC Plate', self)

        freemantestfontMenu = QAction('Predict Contour Font', self)
        freemantestfontMenu.triggered.connect(lambda: self.updateTxtAfter(util.test(self.imgNpBefore, feats='zs', order='font', setname='font')))
        
        freemantestplatMenu = QAction('Predict Contour Plate', self)
        freemantestplatMenu.triggered.connect(lambda:self.updateTxtAfter(util.test(self.imgNpBefore, feats='zs', order='plat', setname='plat')))

        cctctestfontMenu = QAction('Predict CC + TC Font', self)

        cctctestplatMenu = QAction('Predict CC + TC Plate', self)

        facesMenu = QAction('Show faces', self)
        facesMenu.triggered.connect(lambda: self.updateImgAfter(util.getFaces(self.imgNpBefore, self.skin, range=70)))

        faceMenu = QAction('Show facial features', self)
        faceMenu.triggered.connect(lambda: self.updateImgAfter(util.showobj(self.imgNpBefore, util.getFaceFeats(self.imgNpBefore, self.skin, range=100), color=False)))

        # MENU BAR
        menubar = self.menuBar()

        fileMenu = menubar.addMenu('&File')
        fileMenu.addAction(openFile)
        fileMenu.addAction(exitAction)
        fileMenu.addAction(moveLeft)

        processMenu = menubar.addMenu('&Preprocess')
        #processMenu.addAction(histogramMenu)
        processMenu.addAction(equalizeMenu)
        processMenu.addAction(grayscaleMenu)
        processMenu.addAction(binarizeMenu)
        processMenu.addAction(gaussianMenu)
        processMenu.addAction(resizeMenu)
        processMenu.addAction(segmentMenu)

        edgeMenu = menubar.addMenu('&Edge Detection')
        edgeMenu.addAction(averageMenu)
        edgeMenu.addAction(differenceMenu)
        edgeMenu.addAction(homogenMenu)
        edgeMenu.addAction(sobelMenu)
        edgeMenu.addAction(prewittMenu)
        edgeMenu.addAction(freichenMenu)
        edgeMenu.addAction(kirschMenu)

        featureMenu = menubar.addMenu('&Features')
        featureMenu.addAction(chaincodeMenu)
        featureMenu.addAction(turncodeMenu)
        featureMenu.addAction(skeletonMenu)
        featureMenu.addAction(skinMenu)
        featureMenu.addAction(freemanMenu)

        recogMenu = menubar.addMenu('&Recognition')
        recogMenu.addAction(freemantrainfontMenu)
        recogMenu.addAction(freemantrainplatMenu)
        recogMenu.addAction(cctctrainfontMenu)
        recogMenu.addAction(cctctrainplatMenu)
        recogMenu.addAction(freemantestfontMenu)
        recogMenu.addAction(freemantestplatMenu)
        recogMenu.addAction(cctctestfontMenu)
        recogMenu.addAction(cctctestplatMenu)
        recogMenu.addAction(facesMenu)
        recogMenu.addAction(faceMenu)
        #recogMenu.addAction(

        """
        Toolbar, Status Bar, Tooltip
        """
        self.statusBar().showMessage('Ready')

        QToolTip.setFont(QFont('SansSerif', 10))
        #self.setToolTip('This is a <b>QWidget</b> widget')

        """
        Displaying
        """

        self.setGeometry(12, 30, self.mainWidth, self.mainHeight+80)
        self.setWindowTitle('Pyxel')
        self.setWindowIcon(QIcon('res/web.png'))

        self.setCentralWidget(self.main)

        self.main.setGeometry(QRect(0, 80, self.mainWidth, self.mainHeight))
        #self.mainAfter.setGeometry(QRect(self.mainWidth/2, 80, self.mainWidth/2, self.mainHeight))

        self.show()
Exemplo n.º 21
0
#!/usr/bin/env python

import os, json, socket, sys, util

path = os.path.split(os.path.realpath(__file__))[0]
os.chdir(path)
sys.path.insert(0, path)

os.environ['DEVEL'] = 'yes'
os.environ['PGHOST'] = os.path.join(path, 'postgres_data/socket')

if 'TMUX' in os.environ:  # see https://github.com/sagemathinc/cocalc/issues/563
    del os.environ['TMUX']

util.chdir()

ports = util.get_ports()
base_url = util.base_url()


cmd = "cd ../../ && . smc-env &&  service_hub.py --dev --foreground --hostname=0.0.0.0 --port={hub_port} --share_port=0 --proxy_port=0 --gap=0 --mentions --base_url={base_url} {test} start".format(
    base_url=base_url, hub_port=ports['hub'], test=util.test())
print(cmd)
util.cmd(cmd)
Exemplo n.º 22
0
def cluster_view(request):
    context = {'Json_Filename':'test.json'}
    util.test()
    return render(request, 'cluster_test.html', context)
Exemplo n.º 23
0
from util import test

test()
Exemplo n.º 24
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser(parents=[tools.argparser])
    parser.add_argument('-c', '--config_ft', default='client_data.json',
        dest='config_ft', help='JSON file containing FT client info.')
    parser.add_argument('-d', '--duration', default=20, dest='duration',
        type=int, help='Duration to keep solenoids open (in seconds) [20].')
    parser.add_argument('--history', default=15, dest='hsize', type=int,
        help='Number of entries to keep in the soil probe history. [15]')
    parser.add_argument('-i', '--interval', default=300, dest='interval',
        type=int, help='Time between sensor readings (in seconds). [300]')
    parser.add_argument('-n', '--noinit', default=True, dest='init',
        action='store_false', help='Don\'t initialize devices at startup.')
    parser.add_argument('-o', '--output', dest='output',
        help='Directory to use for logging output.  [stdout only.]')
    parser.add_argument('-t', '--test', default=False, dest='test',
        help='Run a test of the sensors and solenoids. Format: N:M, seconds for each solenoid.')
    # TODO: Calibrate sensors and set this to a reasonable default.
    parser.add_argument('-w', '--water-threshold', default=0,
        dest='water_threshold', type=int,
        help='AIN value below which we open the solenoid to water.')
    args = parser.parse_args()

    if args.init:
        print 'Initializing...'
        util.init()

    if args.test:
        util.test(args.test)

    main_loop(args)
use_cuda = not args.no_cuda and torch.cuda.is_available()

print(use_cuda)

##########################################################################################################################################################
##########################################################################################################################################################
##########################################################################################################################################################
##########################################################################################################################################################

# Define the model

model = torch.load("saves/model_after_retraining.ptmodel")
print(model)
print('accuracy before weight sharing')
util.test(model, use_cuda)
print("---------------------------------")

##########################################################################################################################################################
##########################################################################################################################################################
##########################################################################################################################################################
##########################################################################################################################################################

# Weight sharing

apply_weight_sharing(model)
print('accuacy after weight sharing')
accuracy = util.test(model, use_cuda)

# Save the new model
os.makedirs('saves', exist_ok=True)
Exemplo n.º 26
0
                .format(e + 1, val_losses[e], val_err[e], val_size,
                        cur_time - val_time))
            print('Global time has passed {:.0f}:{:.0f}:{:.0f}'.format(
                h, m, s))
            print('')

            # Make a checkpoint.
            util.save_epoch_result('new_train_result', e, train_losses[e],
                                   train_err[e], val_losses[e], val_err[e])
            if val_err[e] < best_val:
                best_val = val_err[e]
                saver.save(sess,
                           'new_ckpt/model',
                           global_step=e + 1,
                           write_meta_graph=True)

        # Make a final checkpoint.
        saver.save(sess, 'new_ckpt/model-final', write_meta_graph=True)
        print('Training time: {:.2f}'.format(time.time() - begin))
        util.plot_training_result('new_train_result', train_losses, train_err,
                                  val_losses, val_err)

    del (xs_train1, xs_val, xs_train)
    test_loss, test_err = util.test('new_ckpt/model-final.meta',
                                    'new_ckpt/model-final', xs_test, ys_test,
                                    val_batch_size, classes)
    f = open('new_train_result.txt', 'a')
    print('Test_loss = {:.3f},  Test_err = {:.2f}'.format(test_loss, test_err),
          file=f)
    f.close()
Exemplo n.º 27
0
async def test(ctx):
    await ctx.send(util.test(ctx))
    pass
Exemplo n.º 28
0
    results[rec] = {}
    for test in testMetricsList:
        results[rec][test] = -1


for rec in recommenderList:
    for test in testMetricsList:
        if not test.__name__ == 'auc':
            t = 10
        else:
            t = r
        if type(rec.__self__).__name__ == "userKnn" or type(
                rec.__self__).__name__ == "itemKnn":
            eva = matrixEvaluationDict
        else:
            eva = evaluationDict
        results[rec][test] = test(eva, rec, t)

s = "recommender "
for test in testMetricsList:
    s += " & " + test.__name__
s += " \\\\\n"

for rec in recommenderList:
    s += type(rec.__self__).__name__
    for test in testMetricsList:
        s += " & " + str(results[rec][test])[0:6]
    s += " \\\\\n"

print s
Exemplo n.º 29
0
def main():

    parser = get_parser()
    args = parser.parse_args()
    setup_seed(args.seed)
    device = 'cuda:' + str(args.device)
    train_loader, test_loader = build_dataset(args)
    train_accuracies = []
    test_accuracies = []
    class_num = 10 if args.dataset == 'cifar10' else 100

    t_net = {
        'resnet18': resnet18,
        'resnet34': resnet34,
        'resnet56': resnet56,
        'resnet110': resnet110
    }[args.t_model](class_num)
    t_ckpt_name = 'SGD-CIFAR' + str(class_num) + '-' + args.t_model
    if args.dataset == 'cifar10':
        path = '../ckpt/checkpoint/cifar10/' + t_ckpt_name
    else:
        path = '../ckpt/checkpoint/cifar100/' + t_ckpt_name
    ckpt = torch.load(path, map_location=device)
    t_net.load_state_dict(ckpt['net'])
    t_net = t_net.to(device)

    s_ckpt_name = 'SGD-CIFAR' + str(
        class_num) + '-' + args.s_model + '-student' + '-overhaul2'
    s_net = {
        'resnet18': resnet18,
        'resnet20': resnet20,
        'resnet34': resnet34,
        'resnet56': resnet56,
        'resnet110': resnet110
    }[args.s_model](class_num)
    s_net = s_net.to(device)
    optimizer = optim.SGD(s_net.parameters(),
                          args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    criterion = nn.CrossEntropyLoss()

    d_net = distillation.Distiller(t_net, s_net)

    start_epoch = 0

    best_acc = 0
    start = time.time()
    for epoch in range(start_epoch, 150):

        if epoch in [80, 120]:
            for param_group in optimizer.param_groups:
                param_group['lr'] *= 0.1

        train_acc = train_with_distill(d_net, optimizer, device, train_loader,
                                       criterion)
        test_acc = test(s_net, device, test_loader, criterion)
        end = time.time()
        print('epoch %d, train %.3f, test %.3f, time %.3fs' %
              (epoch, train_acc, test_acc, end - start))
        start = time.time()

        # Save checkpoint.
        if best_acc < test_acc:
            best_acc = test_acc
            if epoch > 80:
                state = {
                    'net': s_net.state_dict(),
                }
                if not os.path.isdir('../ckpt/checkpoint'):
                    os.mkdir('../ckpt/checkpoint')
                if args.dataset == 'cifar10':
                    if not os.path.isdir('../ckpt/checkpoint/cifar10'):
                        os.mkdir('../ckpt/checkpoint/cifar10')
                    torch.save(
                        state,
                        os.path.join('../ckpt/checkpoint/cifar10',
                                     s_ckpt_name))
                elif args.dataset == 'cifar100':
                    if not os.path.isdir('../ckpt/checkpoint/cifar100'):
                        os.mkdir('../ckpt/checkpoint/cifar100')
                    torch.save(
                        state,
                        os.path.join('../ckpt/checkpoint/cifar100',
                                     s_ckpt_name))

        print('best_acc %.3f' % best_acc)
        train_accuracies.append(train_acc)
        test_accuracies.append(test_acc)

        if not os.path.isdir('../ckpt/curve'):
            os.mkdir('../ckpt/curve')
        if args.dataset == 'cifar10':
            if not os.path.isdir('../ckpt/curve/cifar10'):
                os.mkdir('../ckpt/curve/cifar10')
            torch.save(
                {
                    'train_acc': train_accuracies,
                    'test_acc': test_accuracies
                }, os.path.join('../ckpt/curve/cifar10', s_ckpt_name))
        elif args.dataset == 'cifar100':
            if not os.path.isdir('../ckpt/curve/cifar100'):
                os.mkdir('../ckpt/curve/cifar100')
            torch.save(
                {
                    'train_acc': train_accuracies,
                    'test_acc': test_accuracies
                }, os.path.join('../ckpt/curve/cifar100', s_ckpt_name))
Exemplo n.º 30
0
def testDivide(number1, number2):
    result = hex((number1 // number2) & util.bitMask)[2:]

    return util.test(
        result, "divide {0} {1}".format(util.toBigIntString(number1),
                                        util.toBigIntString(number2)))
Exemplo n.º 31
0
def testXor(number1, number2):
    result = hex((number1 ^ number2) & util.bitMask)[2:]

    return util.test(
        result, "xor {0} {1}".format(util.toBigIntString(number1),
                                     util.toBigIntString(number2)))
Exemplo n.º 32
0
embedding_size = 10
cin_layer_nums = [40, 30, 20]
dnn_hidden = [200, 200]
device = torch.device('cuda:0')
#device = torch.device('cpu')

model = xDeepFM(dataset.feature_size, dataset.field_size, embedding_size,
                cin_layer_nums, dnn_hidden).to(device)

lr = 0.001
weight_l2 = 1e-4
epoch = 10

st = time.time()
criterion = torch.nn.BCELoss()
optimizer = torch.optim.Adam(params=model.parameters(),
                             lr=lr,
                             weight_decay=weight_l2)
for epoch_i in range(epoch):
    util.train(model,
               optimizer,
               train_data_loader,
               criterion,
               device,
               verbose=200)
    auc = util.test(model, valid_data_loader, device)
    print('epoch:', epoch_i, 'validation: auc:', auc)
    print('cost total time: %d' % (time.time() - st))
auc = util.test(model, test_data_loader, device)
print('test auc:', auc)
Exemplo n.º 33
0
            
            # count acc,loss on trainset
            _, predicted = torch.max(outputs.data, 1)
            
            total += labels.size(0)
            correct += (predicted == labels).sum().item()        
            train_loss += loss.item()
            counter += 1

            if i % 100 == 0:
                # get acc,loss on trainset
                acc = correct / total
                train_loss /= counter
                
                # test
                val_loss, val_acc = test(model, testloader, criterion)

                print('iteration %d , epoch %d:  loss: %.4f  val_loss: %.4f  acc: %.4f  val_acc: %.4f' 
                      %(i, epoch, train_loss, val_loss, acc, val_acc))
                
                # save logs and weights
                with open('log.csv', 'a') as f:
                    writer = csv.writer(f)
                    writer.writerow([i, train_loss, val_loss, acc, val_acc])
                if val_acc > max_val_acc:
                    torch.save(model.state_dict(), 'weights.pkl')
                    max_val_acc = val_acc
                    
                # reset counters
                correct, total = 0, 0
                train_loss, counter = 0, 0
Exemplo n.º 34
0
"""

import os, json, socket, sys, util

path = os.path.split(os.path.realpath(__file__))[0]; os.chdir(path); sys.path.insert(0, path)

os.environ['DEVEL']='yes'
os.environ['PGHOST']=os.path.join(path, 'postgres_data/socket')

util.chdir()

ports    = util.get_ports()
base_url = util.base_url(ports['hub-share-2'], write=False)

print('''\n\nBASE URL: {}\n\n'''.format(base_url))

if 'COCALC_PROJECT_PATH' in os.environ:
    share_path = os.environ['COCALC_PROJECT_PATH'] + '[project_id]'
else:
    share_path= os.path.join(os.environ['SMC_ROOT'], 'data/projects/[project_id]')

cmd = "unset NODE_ENV; cd ../../ && . smc-env &&  service_hub.py --share_path={share_path} --foreground --hostname=0.0.0.0 --port=0 --share_port={share_port} --proxy_port=0 --gap=0 --base_url={base_url} {test} start".format(
    base_url   = base_url,
    share_port = ports['hub-share-2'],
    share_path = share_path, test=util.test())

util.cmd(cmd)


Exemplo n.º 35
0
def testRShift(number, shiftBy):
    assert (shiftBy >= 0)
    result = hex(number >> shiftBy)[2:]
    nString = util.toBigIntString(number)

    return util.test(result, "rshift {0} {1}".format(nString, shiftBy))
Exemplo n.º 36
0
from sys import argv
from util import setup, init, select, test


def show_help():
    print("""Usage:
trainer.py [cmd]
cmd: setup or test""")


if __name__ == '__main__':
    init()

    if len(argv) >= 2:
        cmd = argv[1]

        if cmd == 'setup':
            setup()
        elif cmd == 'select':
            select(int(argv[2]))
        elif cmd == 'test':
            test()
        # elif cmd == 'add':
        #     repo = argv[2]
        else:
            show_help()
    else:
        show_help()
Exemplo n.º 37
0
def testLShift(number, shiftBy):
    assert (shiftBy >= 0)
    result = hex((number << shiftBy) & util.bitMask)[2:]
    nString = util.toBigIntString(number)

    return util.test(result, "lshift {0} {1}".format(nString, shiftBy))
Exemplo n.º 38
0
def run_tests():
  util.test()
  sexp.test()
Exemplo n.º 39
0
def testComplement(number):
    result = hex((~number) & util.bitMask)[2:]

    return util.test(result, "")
Exemplo n.º 40
0
def main(cli_args):
    parser = argparse.ArgumentParser(
        description="CSCE 496 HW 2, Classify Cifar data")
    parser.add_argument('--input_dir',
                        type=str,
                        default='/work/cse496dl/shared/homework/02',
                        help='Numpy datafile input')
    parser.add_argument(
        '--model_dir',
        type=str,
        default='./homework_2/',
        help='directory where model graph and weights are saved')
    parser.add_argument('--epoch',
                        type=int,
                        default=100,
                        help="Epoch : number of iterations for the model")
    parser.add_argument('--batch_size',
                        type=int,
                        default=32,
                        help="Batch Size")
    parser.add_argument('--model',
                        type=int,
                        help=" '1' for basic model, '2' for best model")
    parser.add_argument(
        '--stopCount',
        type=int,
        default=100,
        help="Number of times for dropping accuracy before early stopping")
    args_input = parser.parse_args(cli_args)

    if args_input.input_dir:
        input_dir = args_input.input_dir
    else:
        raise ValueError("Provide a valid input data path")

    if args_input.model_dir:
        model_dir = args_input.model_dir
    else:
        raise ValueError("Provide a valid model data path")

    if args_input.epoch:
        epochs = args_input.epoch
    else:
        raise ValueError("Epoch value cannot be null and has to be an integer")

    if args_input.batch_size:
        batch_size = args_input.batch_size
    else:
        raise ValueError(
            "Batch Size value cannot be null and has to be an integer")

    if args_input.model:
        model = args_input.model
    else:
        raise ValueError("Model selection must not be empty")

    if args_input.stopCount:
        stop_counter = args_input.stopCount
    else:
        raise ValueError("StopCount have to be an int")

    input_dir = '/work/cse496dl/shared/homework/02'
    #Make output model dir
    if os.path.exists(model_dir) == False:
        os.mkdir(model_dir)

    #Load Data
    x = tf.placeholder(tf.float32, [None, 32, 32, 3], name='input_placeholder')
    y = tf.placeholder(tf.float32, [None, 100], name='labels')

    #Specify Model
    if (str(model) == '1'):
        train_images, train_labels, test_images, test_labels, val_images, val_labels = util.load_data(
            "")
        _, outputLayer = initiate_basic_model(x)
        #Run Training with early stopping and save output
        counter = stop_counter
        prev_winner = 0
        curr_winner = 0
        optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
        cross_entropy = util.cross_entropy_op(y, outputLayer)
        global_step_tensor = util.global_step_tensor('global_step_tensor')
        train_op = util.train_op_basic(cross_entropy, global_step_tensor,
                                       optimizer)
        conf_matrix = util.confusion_matrix_op(y, outputLayer, 100)
        saver = tf.train.Saver()
        with tf.Session() as session:
            session.run(tf.global_variables_initializer())
            counter = stop_counter
            for epoch in range(epochs):
                if counter > 0:
                    print("Epoch : " + str(epoch))
                    util.training(batch_size, x, y, train_images, train_labels,
                                  session, train_op, conf_matrix, 100)
                    accuracy = util.validation(batch_size, x, y, val_images,
                                               val_labels, session,
                                               cross_entropy, conf_matrix, 100)
                    if epoch == 0:
                        prev_winner = accuracy
                        print("Saving.......")
                        saver.save(session,
                                   os.path.join("./homework_2/", "homework_2"))
                    else:
                        curr_winner = accuracy
                        if (curr_winner > prev_winner) and (counter > 0):
                            prev_winner = curr_winner
                            print("Saving.......")
                            saver.save(
                                session,
                                os.path.join("./homework_2/", "homework_2"))
                        else:
                            counter -= 1

                    test_accuracy = util.test(batch_size, x, y, test_images,
                                              test_labels, session,
                                              cross_entropy, conf_matrix, 100)
                    #Calculate the confidence interval
                    value1, value2 = util.confidence_interval(
                        test_accuracy, 1.96, test_images.shape[0])
                    print("Confidence Interval : " + str(value1) + " , " +
                          str(value2))
                else:
                    break

    elif (str(model) == '2'):
        sparsity_weight = 5e-3
        #Load the data and reshape it
        train_data = np.load(
            os.path.join(os.path.join(input_dir, 'imagenet_images.npy')))
        train_images, train_labels, test_images, test_labels, val_images, val_labels = util.load_data(
            "")
        #train_data = np.reshape(train_data, [-1,32,32,1])
        #Add noise to the data
        noise_level = 0.2
        x_noise = x + noise_level * tf.random_normal(tf.shape(x))
        code, outputs = initiate_autoencoder(x_noise, 100)
        #Optimizer for Autoencoder
        sparsity_loss = tf.norm(code, ord=1, axis=1)
        reconstruction_loss = tf.reduce_mean(tf.square(outputs -
                                                       x))  # Mean Square Error
        total_loss = reconstruction_loss + sparsity_weight * sparsity_loss
        optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
        train_op = optimizer.minimize(total_loss)
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            util.autoencoder_training(x, code, epochs, batch_size, train_data,
                                      sess, train_op)
            saver.save(sess, os.path.join("./homework_2/", "homework_2"))
        print("Done : " + str(code))

        _, outputLayer = initiate_dense_model(code)

        #Run Training with early stopping and save output
        counter = stop_counter
        prev_winner = 0
        curr_winner = 0
        optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
        cross_entropy = util.cross_entropy_op(y, outputLayer)
        global_step_tensor = util.global_step_tensor('global_step_tensor')
        #train_op = util.train_op_encoder(cross_entropy, global_step_tensor, optimizer, var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, "code_layer"))
        train_op = util.train_op_basic(cross_entropy, global_step_tensor,
                                       optimizer)
        conf_matrix = util.confusion_matrix_op(y, outputLayer, 100)
        with tf.Session() as session:
            session.run(tf.global_variables_initializer())
            if os.path.isfile(os.path.join("./homework_2/", "homework_2")):
                saver = tf.train.import_meta_graph("homework_2.meta")
                saver.restore(session, "./homework_2/homework_2")
            code_encode = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                            "code_layer")
            session.run(
                tf.variables_initializer(code_encode,
                                         name="init_encoded_layer"))
            tf.stop_gradient(
                tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                  "init_encoded_layer"))
            counter = stop_counter
            for epoch in range(epochs):
                if counter > 0:
                    print("Epoch : " + str(epoch))
                    util.training(batch_size, x, y, train_images, train_labels,
                                  session, train_op, conf_matrix, 100)
                    accuracy = util.validation(batch_size, x, y, val_images,
                                               val_labels, session,
                                               cross_entropy, conf_matrix, 100)
                    if epoch == 0:
                        prev_winner = accuracy
                        print("Saving.......")
                        saver.save(session,
                                   os.path.join("./homework_2/", "homework_2"))
                    else:
                        curr_winner = accuracy
                        if (curr_winner > prev_winner) and (counter > 0):
                            prev_winner = curr_winner
                            print("Saving.......")
                            saver.save(
                                session,
                                os.path.join("./homework_2/", "homework_2"))
                        else:
                            print("Validation Loss : " +
                                  str(curr_winner - prev_winner))
                            counter -= 1

                    test_accuracy = util.test(batch_size, x, y, test_images,
                                              test_labels, session,
                                              cross_entropy, conf_matrix, 100)
                    #Calculate the confidence interval
                    value1, value2 = util.confidence_interval(
                        test_accuracy, 1.96, test_images.shape[0])
                    print("Confidence Interval : " + str(value1) + " , " +
                          str(value2))
                else:
                    break
'''
    モジュールの読み込みと実行Pythonはモジュールを読み込む際に実行をしています。そのため、先程までのような関数のみのモジュールを importしても全く影響はありませんが、特定の処理をするコードを書くとそれが実行されてしまいます。例えば以下の関数を定義しているモジュール util.pyですが、これを importするだけで 4行目が実行されて testと出力されてしまいます。

    def test():
    print("test")

'''

import util

util.test()

'''
    そのため、モジュールとして読み込まれることを想定して開発された Pythonのプログラムファイルは実行されるコードを含まないべきです。
    モジュールを読み込んだ際になんらかの初期化処理が必要な場合でも関数などとして提供するほうが行儀はいいです。
    なぜなら importをする側は importをするだけで勝手になんらかの処理をすると想定していないからです。
    ただ、モジュールがプログラムの起点になることもあれば importされることもあるという場合は「起点となる場合はある処理をする」一方、「モジュールとして呼び出される場合はそれをしない」という実装が必要なことがあります。
    
       これを実現するには特殊な変数 __name__を使います。
    これは特殊属性(詳細は本シリーズの下編にて扱います)と呼ばれる高度なトピックなのですが、難しいことは考えずにこれにはモジュール名が入っていると認識して下さい。
    たとえば util.pyをモジュールとして読み込めば utilとなりますし、testmodule.pyを読み込めば testmoduleとなります。
    ただし、一つ例外がありプログラムの起点となるプログラムはモジュール名がファイル名ではなく __main__となります。


    せっかくなので実際に試してみます。以下の3つのファイルで実行してみてください。

    hello.py(実行ファイル)
    nice.py(呼び出しファイル)
    world.py(呼び出しファイル)
'''
Exemplo n.º 42
0
bin_search_cases = {
    'odds not found -1': ([0, 1, 2, 3, 4], -1),
    'odds found 0': ([0, 1, 2, 3, 4], 0),
    'odds found 1': ([0, 1, 2, 3, 4], 1),
    'odds found 2': ([0, 1, 2, 3, 4], 2),
    'odds found 3': ([0, 1, 2, 3, 4], 3),
    'odds found 4': ([0, 1, 2, 3, 4], 4),
    'evens not found -1': ([0, 1, 2, 3], -1),
    'evens found 0': ([0, 1, 2, 3], 0),
    'evens found 1': ([0, 1, 2, 3], 1),
    'evens found 2': ([0, 1, 2, 3], 2),
    'evens found 3': ([0, 1, 2, 3], 3),
    'evens not found 4': ([0, 1, 2, 3], 4),
}

util.test('BINARY SEARCH', algo.bin_search, bin_search_cases)

n7 = util.create_node(7)
n6 = util.create_node(6)
n3 = util.create_node(3, [n6, n7])

n5 = util.create_node(5)
n4 = util.create_node(4)
n2 = util.create_node(2, [n4, n5])

n1 = util.create_node(1, [n2, n3])

dfs_bfs_cases = {
    'map-dfs': (n1, lambda x, r: r + [x.get('v')], []),
    'filter-dfs': (n1, lambda x, r: r + [x.get('v')] if x.get('v') > 1 else r, [])
}