Ejemplo n.º 1
0
async def monitor_live(timestamp: int):
    config = yaml.safe_load(open('./bilibili.yaml', 'rb'))
    room_monitor_config = process_config(config['room_monitor'], 'room')
    live_room_ids = [
        room_id for room_id in room_monitor_config['room_ids']
        if await room_status(room_id)
    ]
    return {'live': live_room_ids}
Ejemplo n.º 2
0
def main():
    """Runs the main deep learning pipeline."""
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print('Missing or invalid arguments.')
        exit(0)

    weights_path = get_best_weights(config)

    print('Create the model.')
    model = DeepSwipeModel(config)

    print('Loading weights.')
    model.model.load_weights(weights_path)

    print('Opening VideoObject')

    cv2.namedWindow("Preview")
    cap = cv2.VideoCapture(0)

    crop_size = 224

    ACTIVE_LEN = 10
    ACTIVE_WIDTH = crop_size  # todo: change to crop size
    ACTIVE_HEIGHT = crop_size  # todo: change to crop size

    active_frames = np.zeros((ACTIVE_LEN, ACTIVE_HEIGHT, ACTIVE_WIDTH, 3))

    FRAME_WIDTH = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    FRAME_HEIGHT = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    while True:
        rval, frame = cap.read()  # read in frame
        frame = crop_frame(frame, FRAME_WIDTH, FRAME_HEIGHT,
                           crop_size)  # crop frame
        frame_reshaped = np.expand_dims(frame, axis=0)  # reshape frame

        if frame is not None:
            cv2.imshow("preview", frame)  # print reshaped frame

        active_frames = np.concatenate((active_frames, frame_reshaped),
                                       axis=0)  # add frame
        active_frames = active_frames[1:, :, :, :]  # pop first frame

        now = datetime.datetime.now()
        input_video = np.expand_dims(active_frames, axis=0)
        pred = model.model.predict(input_video)  # add batch_size=1 dimension

        print(str(now), " | ", "Prediction: ", str(pred))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            cap.release()
            break

    cap.release(
    )  # prevents error in [AVCaptureDeviceInput initWithDevice:error:]
Ejemplo n.º 3
0
def test_full():
    """ Take a config, process video, upload, process in Cloud, download response, process result

    Returns:

    """
    config, _config_parser = process_config()
    ga = google_speech_api(**config)
    mute_list, transcript = ga.process_speech(config.uri)
Ejemplo n.º 4
0
 def test_color_encoder(self):
     config, _, _ = process_config('../input_params.json')
     colors_hsv = config['colors']
     color = 'blue'
     self.assertAlmostEqual(np.sin(2 * np.pi * colors_hsv[color][0] / 360),
                            -0.8660254037844384)
     self.assertAlmostEqual(np.cos(2 * np.pi * colors_hsv[color][0] / 360),
                            -0.5000000000000004)
     self.assertListEqual(colors_hsv[color], [240, 1, 1])
Ejemplo n.º 5
0
def test_resume_op():
    """ Download from a previously started operation

    Returns:

    """
    config, _config_parser = process_config()
    ga = google_speech_api(**config)
    ga.resume_operation(config.load_operation_path)
Ejemplo n.º 6
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([
        config['result_dir'], config['checkpoint_dir'],
        config['checkpoint_dir_lstm']
    ])
    # save the config in a txt file
    save_config(config)
    sess_centralized = tf.Session(config=tf.ConfigProto())
    data = DataGenerator(config)
    model_vae = VAEmodel(config, "Centralized")
    model_vae.load(sess_centralized)
    trainer_vae = vaeTrainer(sess_centralized, model_vae, data, config)
    # here you train your model
    if config['TRAIN_VAE']:
        if config['vae_epochs_per_comm_round'] > 0:
            trainer_vae.train()

    if config['TRAIN_LSTM']:
        # create a lstm model class instance
        lstm_model = lstmKerasModel("Centralized", config)

        # produce the embedding of all sequences for training of lstm model
        # process the windows in sequence to get their VAE embeddings
        lstm_model.produce_embeddings(model_vae, data, sess_centralized)

        # Create a basic model instance
        lstm_nn_model = lstm_model.lstm_nn_model
        lstm_nn_model.summary()  # Display the model's architecture
        # checkpoint path
        checkpoint_path = lstm_model.config['checkpoint_dir_lstm']\
                                        + "cp_{}.ckpt".format(lstm_model.name)
        # Create a callback that saves the model's weights
        cp_callback = tf.keras.callbacks.ModelCheckpoint(
            filepath=checkpoint_path, save_weights_only=True, verbose=1)
        # load weights if possible
        # lstm_model.load_model(lstm_nn_model, config, checkpoint_path)

        # start training
        if config['lstm_epochs_per_comm_round'] > 0:
            lstm_model.train(lstm_nn_model, cp_callback)

    sess_centralized.close()
Ejemplo n.º 7
0
def main():
    """Runs the main deep learning pipeline."""
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print('Missing or invalid arguments.')
        exit(0)

    print('Create experiment directories.')
    create_dirs([
        config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir
    ])

    # TODO Refactor this
    print('Create partitions and labels.')
    partition = {}
    all_ids = [
        filename.split('.')[0] for filename in os.listdir('data')
        if filename.endswith('.npy')
    ]
    partition['train'] = all_ids[50:]
    partition['validation'] = all_ids[:50]

    labels_ids = [
        filename.split('.')[0] for filename in os.listdir('data')
        if filename.endswith('.npy')
    ]
    labels_values = [1 if 'swipe_positive_right' in filename \
                     else -1 if 'swipe_positive_left' in filename \
                     else 0 for filename in os.listdir('data') if filename.endswith('.npy')]
    labels = dict(zip(labels_ids, labels_values))

    print('Create the training and validation data generators.')
    training_generator = DeepSwipeDataGenerator(config, partition['train'],
                                                labels)
    validation_generator = DeepSwipeDataGenerator(config,
                                                  partition['validation'],
                                                  labels)
    data_generator = (training_generator, validation_generator)

    print('Create the model.')
    model = DeepSwipeModel(config)

    print('Create the trainer')
    trainer = DeepSwipeTrainer(model.model, data_generator, config)

    print('Start training the model.')
    trainer.train()
Ejemplo n.º 8
0
def train(train_file, valid_file, test_file, output_file):

    config = process_config('config.json')

    config.train_file = train_file
    config.valid_file = valid_file
    config.test_file = test_file
    config.out_file = output_file

    agent = NLPAgent(config)

    #agent.validate()
    agent.train()

    agent.validate()
Ejemplo n.º 9
0
    def test_encoders(self):
        config, _, _ = process_config('../input_params.json')
        color_encoder = LabelEncoder()
        color_encoder.fit(list(config['colors'].keys()))

        harmony_encoder = LabelEncoder()
        harmony_encoder.fit(config['harmonies'])

        color = 'blue'
        color_enc = color_encoder.transform([color])
        self.assertListEqual(color_enc.tolist(), [1])
        color_onehot = to_categorical(color_enc,
                                      num_classes=len(color_encoder.classes_))
        self.assertListEqual(color_onehot.tolist(),
                             [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])

        color = 'green-cyan'
        color_enc = color_encoder.transform([color])
        self.assertListEqual(color_enc.tolist(), [6])
        color_onehot = to_categorical(color_enc,
                                      num_classes=len(color_encoder.classes_))
        self.assertListEqual(color_onehot.tolist(),
                             [[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]])

        color = 'yellow'
        color_enc = color_encoder.transform([color])
        self.assertListEqual(color_enc.tolist(), [12])
        color_onehot = to_categorical(color_enc,
                                      num_classes=len(color_encoder.classes_))
        self.assertListEqual(color_onehot.tolist(),
                             [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])

        harmony = 'analogous'
        harmony_enc = harmony_encoder.transform([harmony])
        self.assertListEqual(harmony_enc.tolist(), [0])
        harmony_onehot = to_categorical(harmony_enc,
                                        num_classes=len(
                                            harmony_encoder.classes_))
        self.assertListEqual(harmony_onehot.tolist(), [[1, 0, 0, 0, 0, 0]])

        harmony = 'triadic'
        harmony_enc = harmony_encoder.transform([harmony])
        self.assertListEqual(harmony_enc.tolist(), [5])
        harmony_onehot = to_categorical(harmony_enc,
                                        num_classes=len(
                                            harmony_encoder.classes_))
        self.assertListEqual(harmony_onehot.tolist(), [[0, 0, 0, 0, 0, 1]])
Ejemplo n.º 10
0
def main():
    parser = OptionParser()
    parser.add_option("-c",
                      "--conf",
                      dest="configure",
                      help="configure filename")
    options, _ = parser.parse_args()
    if options.configure:
        conf_file = str(options.configure)
    else:
        print('please specify --conf configure filename')
        exit(-1)

    trainset_params, testset_params, net_params, solver_params = process_config(
        conf_file)

    trainset = TrainSet(trainset_params['data_path'],
                        trainset_params['sample'])

    net_params['entity_num'] = trainset.entity_num
    net_params['relation_num'] = trainset.relation_num
    net_params['batch_size'] = trainset.record_num / int(
        net_params['nbatches'])
    model = TransAllModel(net_params)
    model.build_graph()

    pretrain = 'pre'
    if not solver_params.has_key(
            'pretrain_model') or solver_params['pretrain_model'] == '':
        pretrain = 'nop'
    if not testset_params.has_key(
            'save_fld') or testset_params['save_fld'] == '':
        testset_params[
            'save_fld'] = 'models/TransAll_v2_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                trainset_params['data_path'].split('/')[-1],
                trainset_params['sample'], net_params['embed_size_e'],
                net_params['margin'], net_params['learning_rate'],
                net_params['nbatches'], net_params['normed'],
                net_params['activation'], net_params['opt'], pretrain)
    print testset_params['save_fld']
    testset_params['dataset'] = trainset_params['data_path'].split('/')[-1]
    testset = TestSet(trainset_params['data_path'])
    os.environ['CUDA_VISIBLE_DEVICES'] = solver_params['gpu_id']
    testset_params['batch_size'] = net_params['batch_size']
    testset_params['activation'] = net_params['activation']
    test_model(model, testset, testset_params)
Ejemplo n.º 11
0
def process_batch_list(batch_list, config, video_path):
    global UTILIZATION
    if video_path:
        if batch_list:
            print("Video path specified, ignoring batch list")
        batch_list = [video_path]
    else:
        with Path(batch_list).open("r") as f:
            batch_list = f.read().strip().replace("'",
                                                  "").replace('"',
                                                              "").split("\n")
    print(f"Batch {batch_list}")
    UTILIZATION = load_utilization()
    print(f"Utilization for {MONTH}: {UTILIZATION[MONTH]}")
    #UTILIZATION[MONTH] = 480*60

    # load list
    for item in batch_list:
        # Skip commented ones
        if item[0].strip() == "#":
            print(f"skipping {item}")
            continue
        if UTILIZATION[MONTH] > MAX_UTILIZATION:
            warnings.warn("MAX utilization reached")
            break
        print(f"Checking if {item} exists...")
        item = search_folder_for_video(item)
        if item:
            print("Working on ", Path(item).name)
            config, _config_parser = utils.process_config(opts.config,
                                                          video_path=item)
            total_length = utils.get_length(
                item,
                Path(config.ffmpeg_path).parent / "ffprobe")
            total_length = round(total_length + 7.49, 15)
            if total_length > 1200:  # should be longer than 20 minutes
                success = process_item(config, _config_parser)
                if success:
                    UTILIZATION[MONTH] += total_length
            else:
                print(item, "less than 1000 seconds, skipping", total_length)
        else:
            print(item, "not found")

    print("New Utilization", MONTH, UTILIZATION[MONTH])
    save_utilization(UTILIZATION)
Ejemplo n.º 12
0
def test_load_response():
    """ Load a saved response and parse for muting

    Returns:

    """
    # Load old response
    config, _config_parser = process_config(
        video_path=
        "J:\Media\Videos\Movies\General\Argo (2012) [Unknown] [R]\Argo (2012) [Unknown] [R].mp4"
    )
    ga = google_speech_api(**config)

    response = ga.load_response(
        ROOT /
        "data/google_api/Margin.Call.2011.1080p.BluRay.x265_2021-01-28 23;17;57.response"
    )
    mute_list, transcript = ga.create_mute_list_from_response(response)
    print(response.results)
    print(mute_list)
Ejemplo n.º 13
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config['result_dir'], config['checkpoint_dir'], config['checkpoint_dir_lstm']])
    # save the config in a txt file
    save_config(config)
    # create tensorflow session
    sessions = []
    data = []
    model_vaes = []
    vae_trainers = []
    lstm_models = []
    model_vae_global = VAEmodel(config, "Global")
    sess_global = tf.Session(config=tf.ConfigProto())
    for i in range(1, 10):
        sess = tf.Session(config=tf.ConfigProto())
        sessions.append(sess)
        data.append(generator_fl(config, i))
        model_vaes.append(VAEmodel(config, "Client{}".format(i)))
        model_vaes[-1].load(sessions[-1])
        vae_trainers.append(vaeTrainer(sessions[-1], model_vaes[-1], data[-1], config))
        lstm_models.append(lstmKerasModel("Client{}".format(i), config))
    model_vae_global.load(sess_global)
    trainer_vae_global = vaeTrainer(sess_global, model_vae_global, data[0], config)
    lstm_model_global = lstmKerasModel("Global", config)
    client_weights = [0.1] * 8
    client_weights.append(0.2)
    aggregator = Aggregator(vae_trainers, trainer_vae_global, lstm_models, lstm_model_global, config, client_weights)
    aggregator.aggregate_vae()
    aggregator.aggregate_lstm()
    def test_encoder(self):
        config, _, _ = process_config('../input_params.json')
        color_encoder = LabelEncoder()
        color_encoder.fit(config['colors'])

        harmony_encoder = LabelEncoder()
        harmony_encoder.fit(config['harmonies'])

        color = 'orange'
        color_enc = color_encoder.transform([color])
        self.assertListEqual(color_enc.tolist(), [9])
        color_onehot = to_categorical(color_enc,
                                      num_classes=len(color_encoder.classes_))
        self.assertListEqual(color_onehot.tolist(),
                             [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]])

        harmony = 'analogous'
        harmony_enc = harmony_encoder.transform([harmony])
        self.assertListEqual(harmony_enc.tolist(), [0])
        harmony_onehot = to_categorical(harmony_enc,
                                        num_classes=len(
                                            harmony_encoder.classes_))
        self.assertListEqual(harmony_onehot.tolist(), [[1, 0, 0, 0, 0, 0]])
Ejemplo n.º 15
0
async def monitor_dynamic(timestamp: int):
    config = yaml.safe_load(open('./bilibili.yaml', 'rb'))
    dynamic_monitor_config = process_config(config['user_monitor'], 'dynamic')
    new_dynamics = await user_new_dynamic(dynamic_monitor_config['user_ids'],
                                          timestamp)
    return {'dynamic': new_dynamics}
Ejemplo n.º 16
0
def main(config_path: str):
    config = process_config(config_path)
    train_dataset, test_dataset = create_dataset(config)
    vgg11 = VGG11(config).compile_model()
    trainer = VGG11Trainer(vgg11, train_dataset, test_dataset, config)
    trainer.train()
Ejemplo n.º 17
0
                    default=None,
                    type=str,
                    help='whether to predict video')
parser.add_argument('--video_save',
                    dest='video_save',
                    default=None,
                    type=str,
                    help='whether to save video predict result')

args = parser.parse_args()

if __name__ == '__main__':
    print('--Parsing Config File')

    modeldir = args.model_dir
    configfile = os.path.join(modeldir, args.config_file)
    modelfile = os.path.join(modeldir, args.model_file)
    print(modelfile)

    params = process_config(configfile)
    model = Inference(params=params, model=modelfile)
    predict = PredictAll(model=model, resize=args.resize, hm=args.hm)

    if args.image_file is not None:
        # single image prediction
        predict.predict_image(args.image_file)
    elif args.camera is not None:
        predict.predict_camera(args.camera)
    elif args.video is not None:
        predict.predict_video(args.video, args.video_save)
Ejemplo n.º 18
0
def main():
    parser = OptionParser()
    parser.add_option("-c",
                      "--conf",
                      dest="configure",
                      help="configure filename")
    options, _ = parser.parse_args()
    if options.configure:
        conf_file = str(options.configure)
    else:
        print('please specify --conf configure filename')
        exit(-1)

    trainset_params, testset_params, net_params, solver_params = process_config(
        conf_file)

    #trainset = TrainSet(trainset_params['data_path'], trainset_params['sample'], asym=True)
    trainset = TrainSet(trainset_params['data_path'],
                        trainset_params['sample'])

    net_params['entity_num'] = trainset.entity_num
    net_params['relation_num'] = trainset.relation_num
    net_params['batch_size'] = trainset.record_num / int(
        net_params['nbatches'])

    if solver_params['phase'] == 'train':
        model = TransEModel(net_params)
        model.build_graph()
        os.environ['CUDA_VISIBLE_DEVICES'] = solver_params['gpu_id']
        batch_gen = trainset.batch_gen(net_params['batch_size'])

        if not solver_params.has_key(
                'pretrain_model') or solver_params['pretrain_model'] == '':
            solver_params['pretrain_model'] = None

        if not solver_params.has_key('save_fld'):
            solver_params[
                'save_fld'] = 'models/TransE_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                    trainset_params['data_path'].split('/')[-1],
                    trainset_params['sample'], net_params['embed_size'],
                    net_params['margin'], net_params['learning_rate'],
                    net_params['nbatches'], net_params['normed'],
                    net_params['dorc'], net_params['opt'])
        elif solver_params['save_fld'] == '':
            solver_params['save_fld'] = None
        print solver_params['save_fld']

        if not solver_params.has_key('summary_fld'):
            solver_params[
                'summary_fld'] = 'graphs/TransE_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                    trainset_params['data_path'].split('/')[-1],
                    trainset_params['sample'], net_params['embed_size'],
                    net_params['margin'], net_params['learning_rate'],
                    net_params['nbatches'], net_params['normed'],
                    net_params['dorc'], net_params['opt'])
        elif solver_params['summary_fld'] == '':
            solver_params['summary_fld'] = None

        solver_params['dorc'] = net_params['dorc']
        train_model(model, batch_gen, solver_params)

        if solver_params['save_fld']:
            testset_params['save_fld'] = solver_params['save_fld']
            testset_params['start'] = 1
            testset_params['end'] = 1
            testset_params['interval'] = solver_params['max_iter']
            testset_params['dataset'] = trainset_params['data_path'].split(
                '/')[-1]
            testset = TestSet(trainset_params['data_path'], 'test')
            testset_params['batch_size'] = net_params['batch_size']
            if testset_params['testtype'] == 'link':
                test_model_link(model, testset, testset_params)
            elif testset_params['testtype'] == 'trip':
                raise ValueError('Wait to finish.')
            else:
                raise ValueError('Undefined testtype.')
    elif solver_params['phase'] == 'val':
        raise ValueError('Wait to finish.')
    elif solver_params['phase'] == 'test':
        #models = TransEModel(net_params)
        #models.build_graph()
        #os.environ['CUDA_VISIBLE_DEVICES'] = '3'
        models = []
        for i in xrange(4):
            with tf.device('/gpu:%d' % i):
                models.append(TransEModel(net_params))
                models[i].build_graph()
                tf.get_variable_scope().reuse_variables()
        if not testset_params.has_key(
                'save_fld') or testset_params['save_fld'] == '':
            testset_params[
                'save_fld'] = 'models/TransE_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                    trainset_params['data_path'].split('/')[-1],
                    trainset_params['sample'], net_params['embed_size'],
                    net_params['margin'], net_params['learning_rate'],
                    net_params['nbatches'], net_params['normed'],
                    net_params['dorc'], net_params['opt'])

        print testset_params['save_fld']
        testset_params['dataset'] = trainset_params['data_path'].split('/')[-1]
        testset = TestSet(trainset_params['data_path'], 'test')
        #testset = TestSet(trainset_params['data_path'], 'train')
        testset_params['batch_size'] = net_params['batch_size']
        if testset_params['testtype'] == 'link':
            test_model_link(models, testset, testset_params)
        elif testset_params['testtype'] == 'trip':
            raise ValueError('Wait to finish.')
        else:
            raise ValueError('Undefined testtype.')
    else:
        raise ValueError('Undefined phase.')
Ejemplo n.º 19
0
from utils import process_config, get_args
from data_loader import DataLoader
from model import UnetModel
from trainer import UnetTrainer
from logger import Logger
import tensorflow as tf

if __name__ == '__main__':

    args = get_args()
    config = process_config(args.config)

    data = DataLoader(config)
    model = UnetModel(config)

    with tf.Session() as sess:
        logger = Logger(sess, config)
        trainer = UnetTrainer(sess, model, data, config, logger)
        trainer.train()

        # trainer.validate()
Ejemplo n.º 20
0
    last_time = int(time.time())
    print("last_test_time:"+datetime.datetime.now().isoformat())


async def dynamic_repost():
    global last_time
    status = await monitor_dynamic(last_time)
    new_dynamics: list(int) = status['dynamic']

    if new_dynamics != []:
        for dynamic in new_dynamics:
            for group_id in dynamic_monitor_config[dynamic.user_id]['group_ids']:
                print("OK_dynamic")
                await bot.send({'group_id': group_id}, '新的动态' + '\n' + dynamic.content + '\n' + dynamic.url)

    last_time = int(time.time())
    print("last_test_time:"+datetime.datetime.now().isoformat())


if __name__ == "__main__":
    config = yaml.safe_load(open('./bilibili.yaml', 'rb'))

    dynamic_monitor_config = process_config(config['user_monitor'], 'dynamic')
    room_monitor_config = process_config(config['room_monitor'], 'room')

    sched.add_job(dynamic_repost, 'interval', seconds=30)
    sched.add_job(live_repost, 'interval', seconds=30)
    sched.start()

    bot.run(host='127.0.0.1', port=8080)
Ejemplo n.º 21
0
test_dataset = SBM_dataset(n_graphs=200,
                           n_nodes=args.n_nodes,
                           n_communities=args.n_communities,
                           p=args.p,
                           q=args.q)

# print(train_dataset.output_overlap())
# print(test_dataset.output_overlap())

ones = torch.ones(args.n_nodes // K)
y_list = [
    torch.cat([x * ones for x in p]).long().to(dev)
    for p in permutations(range(K))
]

config = process_config(args.model)

params_dict = {
    'model': config.model,
    'input_dim': 1,
    'input_channel': config.input_channel,
    'hid_dim': config.hid_dim,
    'output_dim': config.hid_dim,
    'num_classes': args.n_communities,
    'output_channel': config.output_channel,
    'num_hops': config.num_hops,  # for models other than swl-gnn
    "nhop_gcn": config.nhop_gcn,  # for swl
    "nhop_gin": config.nhop_gin,  # for swl
    "nhop_min_triangle": config.nhop_min_triangle,  # for swl
    "nhop_motif_triangle": config.nhop_motif_triangle,  # for swl
    "stack_op": config.stack_op,  # for swl
import argparse
import os
import scipy.misc
import numpy as np
import math
from utils import process_config
from model import Singleout_net

from dataprovider import data_provider
import cv2
import tensorflow as tf
cfg = process_config('exp6//config.cfg')
gene = data_provider(cfg)
Color_list = [(220, 20, 60), (255, 0, 255), (138, 43, 226), (0, 0, 255),
              (240, 248, 255), (0, 255, 255), (0, 255, 127), (0, 255, 0),
              (255, 255, 0), (255, 165, 0), (255, 69, 0), (128, 0, 0),
              (255, 255, 255), (188, 143, 143)]
Color_name = [
    'Crimson', 'Magenta', 'BlueViolet', 'Blue', 'AliceBlue', 'Cyan',
    'MediumSpringGreen', 'Lime', 'Yellow', 'Orange', 'OrangeRed', 'Maroon',
    'White', 'RosyBrown'
]


def sample_vector(nums, length):
    direction_vectors = []
    frac = 2 * np.pi / nums
    for i in range(nums):
        direction_vectors.append(
            np.array(
                [math.cos(frac * i), math.sin(frac * i)], dtype=np.float32))
Ejemplo n.º 23
0
import random
import time

import os
# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"]="0"

import tensorflow as tf
from data_loader import DataGenerator
from models import VAEmodel, lstmKerasModel
from trainers import vaeTrainer

from utils import process_config, create_dirs, get_args

# load VAE model
config = process_config('NAB_config.json')
# create the experiments dirs
create_dirs([config['result_dir'], config['checkpoint_dir']])
# create tensorflow session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# create your data generator
data = DataGenerator(config)
# create a CNN model
model_vae = VAEmodel(config)
# create a CNN model
trainer_vae = vaeTrainer(sess, model_vae, data, config)
model_vae.load(sess)

# here you train your model
if config['TRAIN_VAE']:
    if config['num_epochs_vae'] > 0:
Ejemplo n.º 24
0
import cv2
import os
import numpy as np
from matplotlib import pyplot as plt
import scipy.misc
from random import randint
from utils import process_config
cfg = process_config('config.cfg')

# path= "D:\\dataset\\deepworm\\BBBC010_v1_foreground_eachworm\\BBBC010_v1_foreground_eachworm"
# files =os.listdir(path)
# f_name = lambda f:os.path.join(path,f)
# files=files[1:]
# contours=[]
# rects=[]


# for i,it in enumerate(files):
# img=cv2.imread(f_name(it),0)
# (_,cnts, hier) = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
# if len(cnts)==1:
# (x, y, w, h) = cv2.boundingRect(cnts[0])
# contours.append(np.squeeze(cnts[0], axis=1))
# rects.append((x, y, w, h,x+w/2,y+h/2))
# rects = np.array(rects)
# np.savez('worm_data.npz',cnts = contours,rects = rects)
class data_provider():
    def __init__(self, cfg):
        data_dict = np.load(cfg['dataset_path'])
        self.cfg = cfg
        self.contours = data_dict['cnts']
Ejemplo n.º 25
0
import sys
sys.path.append('./')
from optparse import OptionParser
from solver import Solver

from utils import process_config

parser = OptionParser()
parser.add_option("-c", "--conf", dest="configure",  
                  help="configure filename")
(options, args) = parser.parse_args() 
if options.configure:
  conf_file = str(options.configure)
else:
  print('please specify --conf configure filename')
  exit(0)

common_params, dataset_params, net_params, solver_params = process_config(conf_file)

solver = Solver(True, common_params, solver_params, net_params, dataset_params)
#print("还是哦风格")
solver.train_model()
Ejemplo n.º 26
0

    if config.use_gpu:
        model.cuda()

    if config.forward_only is False:
        try:
            engine.train(model, train_feed, test_feed, config)
        except KeyboardInterrupt:
            print("Training stopped by keyboard.")

    # config.batch_size = 10
    model.load_state_dict(torch.load(model_file))
    engine.inference(model, test_feed, config, num_batch=None)

    if config.output_vis:
        with open(dump_file_train, "wb") as gen_f:
            gen_utils.generate_with_act(model, train_feed, config, num_batch=None, dest_f=gen_f)
        with open(dump_file_test, "wb") as gen_f:
            gen_utils.generate_with_act(model, test_feed, config, num_batch=None, dest_f=gen_f)

    # if config.output_mask:
    #     with open(dump_file_valid, "wb") as gen_f:
    #         gen_utils.generate_with_mask(model, valid_feed, config, num_batch=None, dest_f=gen_f)


if __name__ == "__main__":
    config, unparsed = get_config()
    config = process_config(config)
    main(config)
Ejemplo n.º 27
0
def main():
    parser = OptionParser()
    parser.add_option("-c", "--conf", dest="configure", help="configure filename")
    options, _ = parser.parse_args() 
    if options.configure:
        conf_file = str(options.configure)
    else:
        print('please specify --conf configure filename')
        exit(-1)
  
    trainset_params, testset_params, net_params, solver_params = process_config(conf_file)
    
    trainset = TrainSet(trainset_params['data_path'], trainset_params['sample'])
    
    net_params['entity_num'] = trainset.entity_num
    net_params['relation_num'] = trainset.relation_num
    net_params['batch_size'] = trainset.record_num / int(net_params['nbatches'])
    model = ConvModel(net_params)
    model.build_graph()
    
    os.environ['CUDA_VISIBLE_DEVICES'] = solver_params['gpu_id']
    if solver_params['phase'] == 'train':
        batch_gen = trainset.batch_gen(net_params['batch_size'])
    
        if not solver_params.has_key('pretrain_model') or solver_params['pretrain_model'] == '':
            solver_params['pretrain_model'] = None
        
        if not solver_params.has_key('save_fld'):
            solver_params['save_fld'] = 'models/Conv_v1_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                                        trainset_params['data_path'].split('/')[-1], 
                                        trainset_params['sample'],
                                        net_params['embed_size'],
                                        net_params['activation'],
                                        net_params['channel'],
                                        net_params['learning_rate'],
                                        net_params['nbatches'],
                                        net_params['normed'],
                                        net_params['opt'])
        elif solver_params['save_fld'] == '':                               
            solver_params['save_fld'] = None
        print solver_params['save_fld']

        if not solver_params.has_key('summary_fld'):
            solver_params['summary_fld']='graphs/Conv_v1_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                                        trainset_params['data_path'].split('/')[-1], 
                                        trainset_params['sample'],
                                        net_params['embed_size'],
                                        net_params['activation'],
                                        net_params['channel'],
                                        net_params['learning_rate'],
                                        net_params['nbatches'],
                                        net_params['normed'],
                                        net_params['opt'])
        elif solver_params['summary_fld'] == '':
            solver_params['summary_fld'] = None

        train_model(model, batch_gen, solver_params)
        
        if solver_params['save_fld']:
            testset_params['save_fld'] = solver_params['save_fld']
            testset_params['start'] = 1
            testset_params['end'] = 1
            testset_params['interval'] = solver_params['max_iter']
            testset_params['dataset'] = trainset_params['data_path'].split('/')[-1]
            testset = TestSet(trainset_params['data_path'], 'test')
            testset_params['batch_size'] = net_params['batch_size']
            if testset_params['testtype'] == 'link':
                test_model_link(model, testset, testset_params)
            elif testset_params['testtype'] == 'trip':
                raise ValueError('Wait to finish.')
            else:
                raise ValueError('Undefined testtype.')
    elif solver_params['phase'] == 'val':
        raise ValueError('Wait to finish.')
    elif solver_params['phase'] == 'test':
        if not testset_params.has_key('save_fld') or testset_params['save_fld'] == '':
            testset_params['save_fld'] = 'models/Conv_v1_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
                                        trainset_params['data_path'].split('/')[-1], 
                                        trainset_params['sample'],
                                        net_params['embed_size'],
                                        net_params['activation'],
                                        net_params['channel'],
                                        net_params['learning_rate'],
                                        net_params['nbatches'],
                                        net_params['normed'],
                                        net_params['opt'])
        
        print testset_params['save_fld']
        testset_params['dataset'] = trainset_params['data_path'].split('/')[-1]
        testset = TestSet(trainset_params['data_path'], 'test')
        #testset = TestSet(trainset_params['data_path'], 'train')
        testset_params['batch_size'] = net_params['batch_size']
        if testset_params['testtype'] == 'link':
            test_model_link(model, testset, testset_params)
        elif testset_params['testtype'] == 'trip':
            raise ValueError('Wait to finish.')
        else:
            raise ValueError('Undefined testtype.')
    else:
        raise ValueError('Undefined phase.')
Ejemplo n.º 28
0
import SimpleITK as sitk
from myshow import myshow, myshow3d
import matplotlib.pyplot as plt
import cv2
import numpy as np
from utils import process_config,frame_factory,frame_diff,thresh_otsu,concatenate,resize_and_gray,show_img,open_op,PIL_filter,\
plt_show,shape_filter
from ipywidgets import interact, FloatSlider
params = process_config('..\\config.cfg')
frames = frame_factory(params)
fgbg = cv2.createBackgroundSubtractorMOG2()
for i in range(200):
    img, gray = resize_and_gray(frames[i], True)
    #gray=filter_img.filter(gray)
    fgmask = fgbg.apply(gray)
    #absdiff=cv2.absdiff(gray,fgbg.getBackgroundImage())
    #cv2.imshow('frame',concatenate(img,fgmask))
    #cv2.imshow('bs',np.concatenate([gray,absdiff],axis=1))
    #k = cv2.waitKey(100)
    #if k == 27:
    #    break
    #else:
    #    continue
#cv2.destroyAllWindows()
img, gray = resize_and_gray(frames[200], True)
fgmask = fgbg.apply(gray)
plt_show(fgmask)
plt_show(cv2.absdiff(gray, fgbg.getBackgroundImage()))
absdiff = cv2.absdiff(gray, fgbg.getBackgroundImage())
ret, bg = cv2.threshold(fgmask, 126, 255, cv2.THRESH_BINARY)
ret, fg = cv2.threshold(fgmask, 128, 255, cv2.THRESH_BINARY)
Ejemplo n.º 29
0
from models import VAEmodel, lstmKerasModel
from trainers import vaeTrainer

from utils import process_config, create_dirs, get_args
from tensorflow.python.client import device_lib


def get_available_gpus():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU']


print(get_available_gpus())

# load VAE model
config = process_config('PX4_config.json')
# create the experiments dirs
create_dirs([config['result_dir'], config['checkpoint_dir']])
# create tensorflow session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# create your data generator
data = DataGenerator(config)
# create a CNN model
model_vae = VAEmodel(config)
# create a CNN model
trainer_vae = vaeTrainer(sess, model_vae, data, config)
model_vae.load(sess)

# here you train your model
if config['TRAIN_VAE']:
    if config['num_epochs_vae'] > 0:
Ejemplo n.º 30
0
                    dest='configfile',
                    default='config/config_dlib.cfg',
                    type=str,
                    help='config file name')
parser.add_argument('--loadmodel',
                    dest='loadmodel',
                    default=None,
                    type=str,
                    help='model name of continuing training')
args = parser.parse_args()
print(args.configfile)
print(args.loadmodel)

if __name__ == '__main__':
    print('--Parsing Config File')
    params = process_config(args.configfile)
    os.system('mkdir -p {}'.format(params['saver_directory']))
    os.system('cp {0} {1}'.format(args.configfile, params['saver_directory']))

    print('--Creating Dataset')
    dataset = DataGenerator(params['num_joints'], params['img_directory'],
                            params['training_txt_file'], params['img_size'])
    dataset._create_train_table()
    dataset._randomize()
    dataset._create_sets()

    model = HourglassModel(params=params, dataset=dataset, training=True)
    model.generate_model(load=args.loadmodel)
    # model.restore('trained/tiny_200/hourglass_tiny_200_200')
    model.train_model()