コード例 #1
0
def main():
    # Initialize key objects: environment, agent and preprocessor
    env = Environment("127.0.0.1", 8765, debug=False)

    preprocessor = SimplePreprocessor()
    #play(agent, env, preprocessor)
    play_generation([NeuralNetAgent(num_actions, input_size=4, hidden_size=5) for _ in range(5)], env, preprocessor)
コード例 #2
0
    def __init__(self, field=0, strip=True, lowercase=True):

        v = Vocab(zero_indexing=True)
        pp = SimplePreprocessor(strip=strip, lowercase=lowercase)

        super(LabelReader, self).__init__(field, pp, v)

        self.register_data("data_")
コード例 #3
0
ファイル: discrete_feature_reader.py プロジェクト: kedz/ntg
    def __init__(self, field=0, missing_token="MISSING"):

        v = Vocab(zero_indexing=False, special_tokens=[missing_token])
        pp = SimplePreprocessor(strip=True, lowercase=False)

        super(DiscreteFeatureReader, self).__init__(field, pp, v)

        self.register_data("data_")
コード例 #4
0
ファイル: train_transfer.py プロジェクト: bioxfu/DeepLearning
def tain(finetune_depth):
    pre_train_models = config.TRAIN_MODELS
    image_size = config.IMAGES_SIZE
    batch_size = config.BATCH_SIZE
    output_path = config.OUTPUT_PATH
    learning_rate_shallow = config.LEARNING_RATE_SHALLOW
    learning_rate_deep = config.LEARNING_RATE_DEEP
    epcho_shallow = config.EPCHO_SHALLOW
    epcho_deep = config.EPCHO_DEEP
    # construct the training image generator for data augmentation
    aug = image_augment()

    # load the RGB means for the training set
    means = json.loads(open(config.DATASET_MEAN).read())

    for pre_train_model in pre_train_models:

        saved_model = os.path.sep.join(
            [output_path, '{}_model.hdf5'.format(pre_train_model)])

        # initialize the image preprocessors
        sp = SimplePreprocessor(image_size[pre_train_model],
                                image_size[pre_train_model])
        pp = PatchPreprocessor(image_size[pre_train_model],
                               image_size[pre_train_model])
        mp = MeanPreprocessor(means['R'], means['G'], means['B'])
        itap = ImageToArrayPreprocessor()

        # initialize the training and validation dataset generators
        trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5,
                                        batchSize=batch_size,
                                        aug=aug,
                                        preprocessors=[pp, mp, itap],
                                        classes=config.NUM_CLASSES)
        valGen = HDF5DatasetGenerator(config.VAL_HDF5,
                                      batchSize=batch_size,
                                      preprocessors=[sp, mp, itap],
                                      classes=config.NUM_CLASSES)

        if finetune_depth == 'shallow':
            finetune_shallow(pre_train_model, trainGen, valGen, batch_size,
                             output_path, saved_model, learning_rate_shallow,
                             epcho_shallow)
        elif finetune_depth == 'deep':
            finetune_deep(pre_train_model, trainGen, valGen, batch_size,
                          output_path, saved_model, learning_rate_deep,
                          epcho_deep)

        trainGen.close()
        valGen.close()
コード例 #5
0
ファイル: train_custom.py プロジェクト: bioxfu/DeepLearning
def tain(model_exist):
    train_models = config.TRAIN_MODELS
    image_size = config.IMAGES_SIZE
    batch_size = config.BATCH_SIZE
    output_path = config.OUTPUT_PATH
    learning_rate = config.LEARNING_RATE
    epcho = config.EPCHO
    # construct the training image generator for data augmentation
    aug = image_augment()

    # load the RGB means for the training set
    means = json.loads(open(config.DATASET_MEAN).read())

    for train_model in train_models:

        saved_model = os.path.sep.join(
            [output_path, '{}_model.hdf5'.format(train_model)])

        # initialize the image preprocessors
        sp = SimplePreprocessor(image_size[train_model],
                                image_size[train_model])
        pp = PatchPreprocessor(image_size[train_model],
                               image_size[train_model])
        mp = MeanPreprocessor(means['R'], means['G'], means['B'])
        itap = ImageToArrayPreprocessor()

        # initialize the training and validation dataset generators
        trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5,
                                        batchSize=batch_size,
                                        aug=aug,
                                        preprocessors=[pp, mp, itap],
                                        classes=config.NUM_CLASSES)
        valGen = HDF5DatasetGenerator(config.VAL_HDF5,
                                      batchSize=batch_size,
                                      preprocessors=[sp, mp, itap],
                                      classes=config.NUM_CLASSES)

        custom_model(train_model, trainGen, valGen, batch_size, output_path,
                     saved_model, learning_rate, epcho, model_exist)

        trainGen.close()
        valGen.close()
コード例 #6
0
def eval_genome(genome, config):
    net = neat.nn.FeedForwardNetwork.create(genome[1], config)
    fitness = 0.0

    _id = get_port()
    print(_id)

    env = Environment("127.0.0.1", 8765 + _id, debug=False)
    time.sleep(2)

    this_env = os.environ.copy()
    #    this_env["DISPLAY"] = ":1"
    subprocess.Popen(
        ["qutebrowser", "--target", "window", f"http://localhost:{8888+_id}"],
        env=this_env)

    #    subprocess.Popen(["firefox", "-new-window", f"http://localhost:{8888+_id}"], env=this_env)
    #    subprocess.Popen(["opera", f"http://localhost:{8888+_id}"], env=this_env)

    preprocessor = SimplePreprocessor()

    time.sleep(10)
    frame, _, crashed = env.start_game()
    frame = preprocessor.process(frame)
    state = preprocessor.get_initial_state(frame)

    while not crashed:
        action = np.argmax(net.activate(state))
        next_frame, reward, crashed = env.do_action(action)
        #print("action: {}".format(env.actions[action]))
        next_frame = preprocessor.process(next_frame)
        next_state = preprocessor.get_updated_state(next_frame)
        fitness += reward
        state = next_state

    env.close()

    time.sleep(2)

    return fitness
コード例 #7
0
#-*-coding:utf-8-*-
from configs import data_config
from preprocessor import MeanPreprocessor
from preprocessor import PatchPreprocessor
from preprocessor import CropPreprocessor
from preprocessor import SimplePreprocessor
from preprocessor import ImageToArrayPreprocessor
from HDF5DatasetGenerator import HDF5DatasetGenerator
from keras.models import load_model
import json

# Loading the RGB means for the training set
means = json.loads(open(data_config.DATASET_MEAN).read())

# Initialize the image preprocessors
sp = SimplePreprocessor(227, 227)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
cp = CropPreprocessor(227, 227)
iap = ImageToArrayPreprocessor()

# Load the pretrained network
print("[INFO] Loading model")
model = load_model(data_config.MODEL_PATH)

# Initialize the testing dataset generator, then make predictions on the testing dataset
print("[INFO] Predicting on test dataset...")
testGen = HDF5DatasetGenerator(data_config.TEST_HDF5,
                               64,
                               preprocessor=[sp, mp, iap],
                               classes=2)
predictions = model.predict_generator(testGen.generator(),
コード例 #8
0
import cv2
import imutils
from imutils import paths
import os
import numpy as np

ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="Path to input dataset")
args = vars(ap.parse_args())

print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))
classNames = [p.split(os.path.sep)[-2] for p in imagePaths]
classNames = [str(x) for x in np.unique(classNames)]

sp = SimplePreprocessor(64, 64)
iap = ImageToArrayPreprocessor()
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0

(X_train, X_test, y_train, y_test) = train_test_split(data,
                                                      labels,
                                                      test_size=0.25)

lb = LabelBinarizer()
y_train = lb.fit_transform(y_train)
y_test = lb.transform(y_test)

print("[INFO] building and training model...")
model = MiniVGGNet.build(64, 64, 3, len(classNames))
コード例 #9
0
ファイル: resize_images.py プロジェクト: bioxfu/DeepLearning
ap.add_argument("-s", "--size", required=True, type=int,
                help="image size")
ap.add_argument("-o", "--output", required=True,
                help="path to output dataset")
args = vars(ap.parse_args())

os.mkdir(args["output"])

# Grab a random sample of images from the dataset
image_paths = np.array(list(paths.list_images(args["dataset"])))
#indexes = np.random.randint(0, len(image_paths), size=(1000,))
#image_paths = image_paths[indexes]
#print(image_paths)

# Initialize the image preprocessors
sp = SimplePreprocessor(args["size"], args["size"])
itap = ImageToArrayPreprocessor()

# Load the dataset and scale the raw pixel intensities to the range [0, 1]
sdl = SimpleImageLoader(preprocessors=[sp, itap])

# initialize the progress bar
widgets = ["[INFO]: Resizing images.... ", progressbar.Percentage(), " ", 
			progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(image_paths), widgets=widgets).start()


for (i, x) in enumerate(image_paths):
	(data, _, _) = sdl.load([x])
	fname=x.split('/')[-1]
	cv2.imwrite(os.path.sep.join([args["output"], fname]), data[0])
コード例 #10
0
ファイル: rank_accuracy.py プロジェクト: bioxfu/DeepLearning
pre_train_model = config.TRAIN_MODELS[0]
image_size = config.IMAGES_SIZE
batch_size = config.BATCH_SIZE
output_path = config.OUTPUT_PATH
saved_model = os.path.sep.join(
    [output_path, '{}_model.hdf5'.format(pre_train_model)])
model_accuracy = open(
    os.path.sep.join(
        [output_path, '{}_model_accuracy'.format(pre_train_model)]), 'w')

# load the RGB means for the training set
means = json.loads(open(config.DATASET_MEAN).read())

# initialize the image preprocessors
sp = SimplePreprocessor(image_size[pre_train_model],
                        image_size[pre_train_model])
cp = CropPreprocessor(image_size[pre_train_model], image_size[pre_train_model])
mp = MeanPreprocessor(means['R'], means['G'], means['B'])
itap = ImageToArrayPreprocessor()

# load the pretrained network
print("[INFO] loading model...")
model = load_model(saved_model)

# initialize the testing dataset generators, then make predictions on
# the testing data
print("[INFO] predicting on test data (no crops)...")
model_accuracy.write("[INFO] predicting on test data (no crops)...\n")
testGen = HDF5DatasetGenerator(config.TEST_HDF5,
                               batchSize=batch_size,
                               preprocessors=[sp, mp, itap],