コード例 #1
0
 def show_cards(self, players: list):
     img_gen = ImageGenerator(1)
     for player in players:
         img_gen.hand_to_image(player)
         img_file = File(img_gen.get_output('hand').strip())
         # loop = asyncio.get_event_loop()
         player.send_message("Hier zijn uwer kaarten")
         player.send_message(img_file, is_file=True)
コード例 #2
0
ファイル: functions.py プロジェクト: MCGallaspy/end-of-art
def setNewBatch():
	curBatch = currentBatch()
	numNewImgs = len(curBatch)
	ImageGenerator.breed(curBatch, numNewImgs)
	for img in newBatch:
		img.isCurrent = True
		img.save()
	for img in curBatch:
		img.isCurrent = False
		img.save()
コード例 #3
0
    def generate_test_dataset(self):
        """
        Funkcja użyta raz do wygenerowania obrazów testowych, które są używane jako obrazy z domyślnej lokalizacji.
        Użyta klasa pomocnicza ImageGenerator()
        :param self:
        :return:
        """
        ig = ImageGenerator()

        return [
            self.get_meta_data_from_image(ig.draw_circle(512, 100, ColorRGB.BLACK, 1), 'Kolo'),
            self.get_meta_data_from_image(ig.draw_rectangle(300, 200, 20, ColorRGB.BLACK, 4), 'Prostokat'),
            self.get_meta_data_from_image(ig.draw_square(200, 20, ColorRGB.GREEN, 3), 'Kwadrat'),
            self.get_meta_data_from_image(ig.draw_ellipse(256, (130, 30), 0, ColorRGB.BLUE, 3), 'Elipsa'),
            self.get_meta_data_from_image(ig.draw_square(512, 20, ColorRGB.GREEN, 3), 'Kwadrat'),
            self.get_meta_data_from_image(ig.draw_right_triangle(550, 20, ColorRGB.BLACK, 1), 'Trojkat'),
            self.get_meta_data_from_image(ig.draw_square(256, 20, ColorRGB.GREEN, 3), 'Kwadrat'),
            self.get_meta_data_from_image(ig.draw_circle(256, 120, ColorRGB.BLACK, 1), 'Kolo'),
            self.get_meta_data_from_image(ig.draw_right_triangle(200, 20, ColorRGB.BLACK, 1), 'Trojkat'),
            self.get_meta_data_from_image(ig.draw_rectangle(220, 210, 20, ColorRGB.BLACK, 4), 'Prostokat'),
            self.get_meta_data_from_image(ig.draw_rectangle(250, 300, 20, ColorRGB.BLACK, 4), 'Prostokat'),
            self.get_meta_data_from_image(ig.draw_ellipse(256, (60, 120), 0, ColorRGB.BLUE, 3), 'Elipsa'),
            self.get_meta_data_from_image(ig.draw_circle(256, 70, ColorRGB.RED, 2), 'Kolo'),
            self.get_meta_data_from_image(ig.draw_ellipse(256, (30, 110), 20, ColorRGB.BLUE, 3), 'Elipsa'),
            self.get_meta_data_from_image(ig.draw_right_triangle(300, 20, ColorRGB.GREEN, 1), 'Trojkat'),
        ]
コード例 #4
0
    def generate_training_dataset(self):
        """
        Generuje obrazy w locie na podstawie parametrów przekazanych do funkci opencv.
        Użyta klasa pomocnicza ImageGenerator()
        :return:
        """
        ig = ImageGenerator()

        return [
            self.get_meta_data_from_image(ig.draw_square(456, 20, ColorRGB.GREEN, 3), 'Kwadrat'),
            self.get_meta_data_from_image(ig.draw_square(276, 20, ColorRGB.GREEN, 3), 'Kwadrat'),
            self.get_meta_data_from_image(ig.draw_square(226, 20, ColorRGB.GREEN, 3), 'Kwadrat'),
            self.get_meta_data_from_image(ig.draw_square(356, 20, ColorRGB.GREEN, 3), 'Kwadrat'),
            self.get_meta_data_from_image(ig.draw_rectangle(256, 210, 20, ColorRGB.BLACK, 4), 'Prostokat'),
            self.get_meta_data_from_image(ig.draw_rectangle(205, 220, 20, ColorRGB.BLACK, 4), 'Prostokat'),
            self.get_meta_data_from_image(ig.draw_rectangle(350, 300, 20, ColorRGB.BLACK, 4), 'Prostokat'),
            self.get_meta_data_from_image(ig.draw_circle(256, 60, ColorRGB.RED, 2), 'Kolo'),
            self.get_meta_data_from_image(ig.draw_circle(256, 110, ColorRGB.BLACK, 1), 'Kolo'),
            self.get_meta_data_from_image(ig.draw_circle(256, 110, ColorRGB.BLACK, 1), 'Kolo'),
            self.get_meta_data_from_image(ig.draw_ellipse(256, (60, 100), 20, ColorRGB.BLUE, 3), 'Elipsa'),
            self.get_meta_data_from_image(ig.draw_ellipse(256, (50, 100), 0, ColorRGB.BLUE, 3), 'Elipsa'),
            self.get_meta_data_from_image(ig.draw_ellipse(256, (120, 30), 0, ColorRGB.BLUE, 3), 'Elipsa'),
            self.get_meta_data_from_image(ig.draw_ellipse(256, (120, 30), 0, ColorRGB.BLUE, 3), 'Elipsa'),
            self.get_meta_data_from_image(ig.draw_ellipse(256, (100, 30), 0, ColorRGB.BLUE, 3), 'Elipsa'),
            self.get_meta_data_from_image(ig.draw_ellipse(256, (110, 30), 0, ColorRGB.BLUE, 3), 'Elipsa'),
            self.get_meta_data_from_image(ig.draw_right_triangle(256, 20, ColorRGB.GREEN, 1), 'Trojkat'),
            self.get_meta_data_from_image(ig.draw_right_triangle(512, 20, ColorRGB.BLACK, 1), 'Trojkat'),
            self.get_meta_data_from_image(ig.draw_right_triangle(200, 20, ColorRGB.BLACK, 1), 'Trojkat'),
        ]
コード例 #5
0
ファイル: anywizard.py プロジェクト: goshdarnheck/anywizard
# Get start time so we can log elapsed time
start_time = time.time()

# Open and parse config file
dir = os.path.dirname(os.path.abspath(__file__))
Config = configparser.ConfigParser()
Config.read(os.path.join(dir, 'config.ini'))

tweet = Config.getboolean('settings', 'tweet')
templateSetting = Config.get('settings', 'template')

print("Tweeting: " + str(tweet))
print("Template Setting: " + str(templateSetting))

# Generate Image
imageGenerator = ImageGenerator(dir, templateSetting)
imageGenerator.createImage('output.png')

# Get Text to Tweet
textGen = TextGenerator()
tweetText = textGen.getRandomTweetText()
print("\nTweet: " + tweetText + "\n")

# Tweet!
if tweet:
    auth = tweepy.OAuthHandler(
        Config.get('TwitterApiCreds', 'consumer_key'),
        Config.get('TwitterApiCreds', 'consumer_secret'))
    auth.set_access_token(Config.get('TwitterApiCreds', 'access_token'),
                          Config.get('TwitterApiCreds', 'access_token_secret'))
    tweepyApi = tweepy.API(auth)
コード例 #6
0
ファイル: App.py プロジェクト: Alexia1994/graduation
    def _start(self):
        self.image_generator.initial_population()
        self._show_info()
        self._show_images()
        self._enable_canvas_btns()
        self._next_generation_button()



    def _show_next_generation(self):
    
        self.image_generator.next_generation(self.selected_character)
        for selected_index, value in enumerate(self.selected_character):
            #toggle the chosen one(1->0)
            if value == 1:
                self._select_character(selected_index)
        self._show_images()
        self._show_info()



if __name__ == "__main__":
    image_generator = ImageGenerator(population_size = 6, mutation_prob = 0.1)
    app = App("IEC Art Design", image_generator)
    app._set_top_menu_bar()
    app._set_left_frame()
    app._set_right_frame()
    app._show_info()

    app.mainloop()
コード例 #7
0
ファイル: simulated_data.py プロジェクト: kkarbasi/cobalt
import sys
import csv
sys.path.append('./util/')
from ImageGenerator import ImageGenerator
from helper import write_csv


# Feel free to change the dimensions.
# Changing the dimensions will change how many cells are in each image
img_gen = ImageGenerator(1000,1000,100)

# Most basic, easiest image with 45 solid well separated cells
_, centers_1 = img_gen.make_ellipsoidal_image(
    25,
    25,
    5,
    200,
    200,
    10,
    fname = "solid_45_cells"
)

# Smaller cells with gaussian blur
_, centers_2 = img_gen.make_ellipsoidal_image(
    15,
    15,
    5,
    100,
    100,
    20,
    blur = True,
コード例 #8
0
import time
import numpy
from NeuralNetwork import NeuralNetwork
from ImageGenerator import ImageGenerator
from ActivationFunctions.Sigmoid import Sigmoid
from random import shuffle
folder = "D:/ProgrammingBigFiles/Generated Numbers/"
size = 10000
episodes = 20
GENERATE = False
TRAIN = True
myDigits = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
# myDigits = {5: 0, 6: 1, 7: 2}
# myDigits = {6: 0, 7: 1}

imageGenerator = ImageGenerator("Arial")
startTime = time.time()
if (GENERATE): imageGenerator.generateImages(size)
print("Generated Numbers: %.5lfs" % (time.time() - startTime))

database = []
for i in range(size):
    f = open(folder + "%d.ans" % i, "r")
    answer = numpy.zeros((len(myDigits), 1))
    answer[myDigits[int(f.readline())]] = 1
    database += [[imageGenerator.imageToMatrix(i) / 255, answer]]
    f.close()
shuffle(database)
train, test = database[:int(len(database) *
                            0.75)], database[int(len(database) * 0.75):]
コード例 #9
0
ファイル: main_template.py プロジェクト: rerejii/pwd2binary
    path_cls=path_cls,
    title='[TRAIN]_' + os.path.basename(OUT_ROOT_FOLDER),
    label=DATASET,
    item='train_accuracy',
)

# ========== save model ==========
modelpath = path_cls.make_model_path(MAINNAME + '-' + DATASET + '.h5')
nfunc.save_best_generator_model(net_cls=net_cls,
                                path_cls=path_cls,
                                path=modelpath)

# ========== 生成速度計測 ==========
gen_cls = ImageGenerator(Generator_model=modelpath,
                         model_h=IMAGE_HEIGHT,
                         model_w=IMAGE_WIDTH,
                         fin_activate=FIN_ACTIVATE,
                         padding=net_cls.get_padding())
gen_cls.run(img_path=GENERATOR_TEST_PATH,
            out_path=GENERATOR_OUTPATH,
            time_out_path=path_cls.make_csv_path('Generator_time.csv'))
gen_cls = ImageGenerator(Generator_model=modelpath,
                         model_h=IMAGE_HEIGHT,
                         model_w=IMAGE_WIDTH,
                         fin_activate=FIN_ACTIVATE,
                         padding=net_cls.get_padding(),
                         use_gpu=False)
gen_cls.run(
    img_path=GENERATOR_TEST_PATH,
    out_path=GENERATOR_OUTPATH,
    time_out_path=path_cls.make_csv_path('Generator_time_cpu.csv'),
コード例 #10
0
def do_run(i,
           x_train=None,
           y_train=None,
           res_dict=None,
           datagen_settings=None):
    import keras
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Activation, Flatten
    from keras.layers import Conv3D, MaxPooling3D
    from keras.callbacks import EarlyStopping
    from keras.callbacks import ReduceLROnPlateau

    UTC_local = getUTC()  # Randomly generate hyperparameters

    x_train, y_train, x_test, y_test = split_train_test(x_train, y_train)

    modelIndex_dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5}

    filters_dict = {0: 8, 1: 16, 2: 24}

    filterSize_dict = {0: 4, 1: 8}

    poolSize_dict = {0: 4, 1: 2, 2: 3}

    denseSize_dict = {0: 256, 1: 512, 2: 1024}

    dropout_dict = {0: 0, 1: 0.1, 2: 0.2, 3: 0.3, 4: 0.4}

    lr_dict = {0: 0.001, 1: 0.0001, 2: 0.00001}

    decay_dict = {0: 1e-04, 1: 1e-05, 2: 1e-06, 3: 1e-07}

    modelIndex = modelIndex_dict.get(rnd.randint(0, len(modelIndex_dict) - 1))
    filters = filters_dict.get(rnd.randint(0, len(filters_dict) - 1))
    filter_size = filterSize_dict.get(rnd.randint(0, len(filterSize_dict) - 1))
    pool_size = poolSize_dict.get(rnd.randint(0, len(poolSize_dict) - 1))
    dense_size = denseSize_dict.get(rnd.randint(0, len(denseSize_dict) - 1))
    dropout = dropout_dict.get(rnd.randint(0, len(dropout_dict) - 1))
    lr = lr_dict.get(rnd.randint(0, len(lr_dict) - 1))
    decay = decay_dict.get(rnd.randint(0, len(decay_dict) - 1))

    print('######### DEBUG - MAKE_MODEL - params')
    frame = inspect.currentframe()
    args, _, _, values = inspect.getargvalues(frame)
    print('function name "%s"' % inspect.getframeinfo(frame)[2])
    for g in args:
        if 'train' in g:
            continue
        print("    %s = %s" % (g, values[g]))

    print('modelIndex -' + str(modelIndex))
    print('filters - ' + str(filters))
    print('filter_size - ' + str(filter_size))
    print('pool_size - ' + str(pool_size))
    print('dense_size - ' + str(dense_size))
    print('dropout - ' + str(dropout))
    print('lr - ' + str(lr))
    print('decay - ' + str(decay))

    print('#########')
    model = Sequential()
    input_shape = x_train.shape[1:]

    if modelIndex == 1:

        # 1x Conv+relu+MaxPool+Dropout -> 1x Dense+relu+Dropout
        model.add(
            Conv3D(filters, (filter_size, filter_size, filter_size),
                   padding='same',
                   input_shape=input_shape))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(Flatten())
        model.add(Dense(dense_size))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

    elif modelIndex == 2:
        # 2x Conv+relu+MaxPool+Dropout -> 1x Dense+relu+Dropout
        model.add(
            Conv3D(filters, (filter_size, filter_size, filter_size),
                   padding='same',
                   input_shape=input_shape))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(
            Conv3D(pool_size * filters,
                   (filter_size, filter_size, filter_size),
                   padding='same'))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(Flatten())
        model.add(Dense(dense_size))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

    elif modelIndex == 3:
        # 2x Conv+relu+MaxPool+Dropout -> 2x Dense+relu+Dropout
        model.add(
            Conv3D(filters, (filter_size, filter_size, filter_size),
                   padding='same',
                   input_shape=input_shape))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(
            Conv3D(pool_size * filters,
                   (filter_size, filter_size, filter_size),
                   padding='same'))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(Flatten())
        model.add(Dense(dense_size))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

        model.add(Dense(dense_size))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

    elif modelIndex == 4:
        # 2x Conv+relu+Conv+relu+MaxPool+Droput -> 1x Dense+relu+Dropout
        model.add(
            Conv3D(filters, (filter_size, filter_size, filter_size),
                   padding='same',
                   input_shape=input_shape))
        model.add(Activation('relu'))

        model.add(Conv3D(filters, (filter_size, filter_size, filter_size)))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(
            Conv3D(pool_size * filters,
                   (filter_size, filter_size, filter_size),
                   padding='same'))
        model.add(Activation('relu'))

        model.add(
            Conv3D(pool_size * filters,
                   (filter_size, filter_size, filter_size)))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(Flatten())
        model.add(Dense(dense_size))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

    elif modelIndex == 5:
        model.add(
            Conv3D(filters, (filter_size, filter_size, filter_size),
                   padding='same',
                   input_shape=input_shape))
        model.add(Activation('relu'))

        model.add(Conv3D(filters, (filter_size, filter_size, filter_size)))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(
            Conv3D(pool_size * filters,
                   (filter_size, filter_size, filter_size),
                   padding='same'))
        model.add(Activation('relu'))

        model.add(
            Conv3D(pool_size * filters,
                   (filter_size, filter_size, filter_size)))
        model.add(Activation('relu'))

        model.add(MaxPooling3D(pool_size=(pool_size, pool_size, pool_size)))
        model.add(Dropout(dropout))

        model.add(Flatten())
        model.add(Dense(dense_size))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

    # initiate RMSprop optimizer
    opt = keras.optimizers.Adam(lr=lr, decay=decay)

    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    if datagen_settings:
        datagen_train = ImageGenerator(**datagen_settings)
        datagen_test = ImageGenerator(**datagen_settings)
    else:
        datagen_train = None
        datagen_test = None

    early_stopping = EarlyStopping(monitor='val_loss', patience=5)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3)
    dg_batch_size = 16
    print('In loop - {0}'.format(i))
    if datagen_train:
        print('Using data augmentation.')
        history = model.fit_generator(
            datagen_train.flow(x_train, y_train, batch_size=dg_batch_size),
            steps_per_epoch=len(x_train) / dg_batch_size,
            validation_data=datagen_test.flow(x_test,
                                              y_test,
                                              batch_size=dg_batch_size),
            validation_steps=len(x_test) / dg_batch_size,
            epochs=epochs,
            callbacks=[early_stopping, reduce_lr])
    else:
        print('Not using data augmentation.')
        history = model.fit(x_train,
                            y_train,
                            batch_size=batch_size,
                            epochs=epochs,
                            validation_split=0.25,
                            shuffle=True,
                            callbacks=[early_stopping, reduce_lr])

    val_acc = max(np.array(history.history['val_acc']))
    print('Max Validation Accuracy - ' + str(val_acc))

    data = (UTC_local, val_acc, modelIndex, filters, filter_size, pool_size,
            dense_size, dropout, lr, decay)
    res_dict[i] = data
コード例 #11
0
            if not collides: break
        else: continue
        
        KD.addNode(q_next, alpha_new)
        plot_steps((*q_near.node, q_near.alpha), (*q_next, alpha_new), dist, plotter)

        goal_distance = math.hypot(q_next[0]-goal[0], q_next[1]-goal[1]) 
        collides = check_collision(obstacles, (*q_next, alpha_new), goal, goal_distance)
        if not collides:
            plot_steps((*q_next, alpha_new), goal, goal_distance, plotter)
            plotter.draw_rectangle(gen_rect_pts(*goal), facecolor='red', edgecolor='k')
            break

        trials = 0

    print("n =", KD.length)

if __name__ == '__main__':
    from ImageGenerator import ImageGenerator
    from utilities      import get_obstacle_course, get_start_and_goal

    obstacles   = get_obstacle_course("world_obstacles.txt")
    start, goal = (75., 50., 0.), (482.,577.,math.pi/2)

    plotter    = ImageGenerator()
    plotter.draw_obstacle_course(obstacles)
    plotter.draw_start_and_goal(start,goal)

    run(obstacles, start, goal, 2000, plotter)

    input("Press enter to exit : ")
コード例 #12
0
    def test (images, labels, pathModel, nnArchitecture, nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop, launchTimeStamp):
        
        
        CLASS_NAMES = [ 'A', 'B', 'C', 'D', 'E', '']
        
        cudnn.benchmark = True
        
        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        if nnArchitecture == 'DENSE-NET-121': model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169': model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201': model = DenseNet201(nnClassCount, nnIsTrained).cuda()
        
        model = torch.nn.DataParallel(model).cuda() 
        
        modelCheckpoint = torch.load(pathModel)
        model.load_state_dict(modelCheckpoint['state_dict'])

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        
        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops])))
        transformSequence=transforms.Compose(transformList)
        
        datasetTest = ImageGenerator(images=images, labels=labels, transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest, batch_size=trBatchSize, num_workers=8, shuffle=False, pin_memory=True)

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()
       
        model.eval()
        
        for i, (input, target) in enumerate(dataLoaderTest):
            
            #target = target.cuda()
            #outGT = torch.cat((outGT, target), 0)
            
            bs, n_crops, c, h, w = input.size()
            
            varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda(), volatile=True)
            
            out = model(varInput)
            outMean = out.view(bs, n_crops, -1).mean(1)
            
            outPRED = torch.cat((outPRED, outMean.data), 0)

        '''
        aurocIndividual, cm = DensenetTrainer.computeAUROC(outGT, outPRED, nnClassCount, datasetTest)
        aurocMean = np.array(aurocIndividual).mean()

        print ('AUROC mean ', aurocMean)

        for i in range (0, len(aurocIndividual)):
            print (CLASS_NAMES[i], ' ', aurocIndividual[i])

        print(cm)
        '''
        
     
        return outPRED
コード例 #13
0
from CNN import CNN
from DataManager import DataManager
from ImageGenerator import ImageGenerator
from LSTM import LSTM

batch_size = 200
epochs = 30

dm = DataManager()

ig = ImageGenerator()
ig.fit(dm.x_train)
flow = ig.flow(dm.x_train, dm.y_train, batch_size)
CNN(dm).toAlg().run(batch_size, epochs, 'cnn_standard')
CNN(dm).toAlg().run_with_flow(flow, batch_size, epochs, 'cnn_IG')

LSTM(dm).toAlg().run(batch_size, epochs, 'lstm')
# LSTM(dm).toAlg().run_with_flow(flow, batch_size, epochs, 'lstm_IG')
コード例 #14
0
weight_path = "../resources/weight/"
path = "../resources/data/"
train_mat = "../resources/data/trainCharBound.mat"
char2id_path = "../resources/char2id.plk"
log_path = "../resources/log/"
max_string_length = 10
width = 128
height = 32
char_num = 37
epochs = 150
channel_num = 1
K.clear_session()

img_gen = ImageGenerator(char2id_path,
                         train_mat,
                         path,
                         batch_size_=batch_size,
                         max_string_length=max_string_length)
callbacks = [
    keras.callbacks.ModelCheckpoint(
        weight_path + "ep_{epoch}_loss_{loss:0.3f}_val_{val_loss:0.3f}.h5",
        monitor="val_loss",
        save_weights_only=True,
        save_best_only=True),
    keras.callbacks.TensorBoard(log_path,
                                batch_size=batch_size,
                                write_graph=True)
]

my_model = get_model(width,
                     height,
コード例 #15
0
 def send_tables(self):
     ImageGenerator(1).generate_table(self.current_slag, self.players, self.teams)
     file = ImageGenerator(1).get_output('table').strip()
     self.send_to(self.players, file, img=True)