Example #1
0
 def __init__(self, *args, **kwargs):
     tkinter.Canvas.__init__(self, *args, **kwargs)
     self.imageLeft = paths.get_paths()
     self.imageRight = []
     self.width = self.winfo_screenwidth()
     self.height = self.winfo_screenheight()
     self.bg = "black"
Example #2
0
def get_files(dir_path,dirs=True,append_path=True):
    d_path=str(dir_path)
    all_in_dir=os.listdir(d_path)
    if(dirs):    
        files= [f for f in all_in_dir  
                 if (not is_file(f,dir_path))]
    else:
    	files= [f for f in all_in_dir  
                 if is_file(f,dir_path)]
    files=natsorted(files)
    if(append_path):
        files=[paths.get_paths(dir_path,file_i) for file_i in files]
    return files
Example #3
0
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at

#     http://www.apache.org/licenses/LICENSE-2.0

# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import config
import data_generator
import paths

input_paths, context_paths, label_paths = paths.get_paths(
    config.DATA_PATH, config.CONTEXT)

train_paths, valid_paths, test_paths = paths.split_paths(
    input_paths, config.RATIO)

train_gen = data_generator.data_gen(
    train_paths,
    context_paths,
    label_paths,
    context=config.CONTEXT,
    batch_size=config.BATCH_SIZE,
    structure_names=config.STRUCTURE_NAMES,
    resize=config.GRID_SIZE,
)

valid_gen = data_generator.data_gen(
Example #4
0
def main():

    # hyper params
    params = {}
    params[
        'dataset'] = 'birdsnap'  # inat_2018, inat_2017, birdsnap, nabirds, yfcc
    if params['dataset'] in ['birdsnap', 'nabirds']:
        params['meta_type'] = 'ebird_meta'  # orig_meta, ebird_meta
    else:
        params['meta_type'] = ''
    params['batch_size'] = 1024
    params['lr'] = 0.001
    params['lr_decay'] = 0.98
    params['num_filts'] = 256  # embedding dimension
    params['num_epochs'] = 30
    params['log_frequency'] = 50
    # params['device'] = 'cuda' if torch.cuda.is_available() else 'cpu'
    # if torch.cuda.is_available():
    #     device = torch.device("cuda:1")
    #     params['device'] = device
    # else:
    #     params['device'] = 'cpu'
    params['device'] = 'cpu'

    params['balanced_train_loader'] = True
    params['max_num_exs_per_class'] = 100
    params['map_range'] = (-180, 180, -90, 90)

    # specify feature encoding for location and date
    params['use_date_feats'] = False  # if False date feature is not used
    params[
        'loc_encode'] = 'encode_cos_sin'  # encode_cos_sin, encode_3D, encode_none
    params['date_encode'] = 'encode_cos_sin'  # encode_cos_sin, encode_none

    # specify loss type
    # appending '_user' models the user location and object affinity - see losses.py
    params['train_loss'] = 'full_loss_user'  # full_loss_user, full_loss

    ############# add new parameters #########################
    params['spa_enc_type'] = 'theory'  # the type of space encoder type
    params[
        'frequency_num'] = 64  # The number of frequency used in the space encoder
    params[
        'max_radius'] = 360  # The maximum spatial context radius in the space encoder
    params[
        'min_radius'] = 0.0005  # The minimum spatial context radius in the space encoder
    params[
        'spa_f_act'] = "relu"  # The final activation function used by spatial relation encoder
    params[
        'freq_init'] = "geometric"  # The frequency list initialization method
    params[
        'spa_enc_use_postmat'] = True  # whether to use post matrix in spa_enc
    params[
        'num_rbf_anchor_pts'] = 200  # The number of RBF anchor points used in the "rbf" space encoder
    params[
        'rbf_kernal_size'] = 1  # The RBF kernal size in the "rbf" space encoder

    params[
        'num_hidden_layer'] = 1  # The number of hidden layer in feedforward NN in the (global) space encoder
    params[
        'hidden_dim'] = 512  # The hidden dimention in feedforward NN in the (global) space encoder
    params[
        'use_layn'] = True  # use layer normalization or not in feedforward NN in the (global) space encoder
    params[
        'skip_connection'] = True  # skip connection or not in feedforward NN in the (global) space encoder
    params[
        'dropout'] = 0.5  # The dropout rate used in feedforward NN in the (global) space encoder

    ##########################################################

    print('Dataset   \t' + params['dataset'])
    op = dt.load_dataset(params, 'val', True, True)
    # train_locs: np.arrary, [batch_size, 2], location data
    train_locs = op['train_locs']
    # train_classes: np.arrary, [batch_size], the list of image category id
    train_classes = op['train_classes']
    # train_users: np.arrary, [batch_size], the list of user id
    train_users = op['train_users']
    # train_dates: np.arrary, [batch_size], the list of date
    train_dates = op['train_dates']
    val_locs = op['val_locs']
    val_classes = op['val_classes']
    val_users = op['val_users']
    val_dates = op['val_dates']
    class_of_interest = op['class_of_interest']
    classes = op['classes']

    params['num_classes'] = op['num_classes']

    # params['rbf_anchor_pt_ids']: the samples indices in train_locs whose correponding points are unsed as rbf anbchor points
    if params['spa_enc_type'] == 'rbf':
        params['rbf_anchor_pt_ids'] = list(
            np.random.choice(np.arange(len(train_locs)),
                             params['num_rbf_anchor_pts'],
                             replace=False))

    else:
        params['rbf_anchor_pt_ids'] = None

    if params['meta_type'] == '':
        params['model_file_name'] = "../models/model_{}_{}.pth.tar".format(
            params['dataset'], params['spa_enc_type'])
    else:
        params['model_file_name'] = "../models/model_{}_{}_{}.pth.tar".format(
            params['dataset'], params['meta_type'], params['spa_enc_type'])
    op_dir = "image/ims_{}_{}/".format(params['dataset'],
                                       params['spa_enc_type'])
    if not os.path.isdir(op_dir):
        os.makedirs(op_dir)

    # process users
    # NOTE we are only modelling the users in the train set - not the val
    # un_users: a sorted list of unique user id
    # train_users: the indices in un_users which indicate the original train user id
    un_users, train_users = np.unique(train_users, return_inverse=True)
    train_users = torch.from_numpy(train_users).to(params['device'])
    params['num_users'] = len(un_users)
    if 'user' in params['train_loss']:
        assert (params['num_users'] != 1)  # need to have more than one user

    # print stats
    print('\nnum_classes\t{}'.format(params['num_classes']))
    print('num train    \t{}'.format(len(train_locs)))
    print('num val      \t{}'.format(len(val_locs)))
    print('train loss   \t' + params['train_loss'])
    print('model name   \t' + params['model_file_name'])
    print('num users    \t{}'.format(params['num_users']))
    if params['meta_type'] != '':
        print('meta data    \t' + params['meta_type'])

    # load ocean mask for plotting
    mask = np.load(get_paths('mask_dir') + 'ocean_mask.npy').astype(np.int)

    # data loaders
    train_labels = torch.from_numpy(train_classes).to(params['device'])
    # train_feats: torch.tensor, shape [batch_size, 2] or [batch_size, 3]
    train_feats = ut.generate_model_input_feats(
        spa_enc_type=params['spa_enc_type'],
        locs=train_locs,
        dates=train_dates,
        params=params,
        device=params['device'])
    train_dataset = LocationDataLoader(train_feats, train_labels, train_users,
                                       params['num_classes'], True)
    if params['balanced_train_loader']:
        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            num_workers=0,
            batch_size=params['batch_size'],
            sampler=ut.BalancedSampler(train_classes.tolist(),
                                       params['max_num_exs_per_class'],
                                       use_replace=False,
                                       multi_label=False),
            shuffle=False)
    else:
        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            num_workers=0,
            batch_size=params['batch_size'],
            shuffle=True)

    val_labels = torch.from_numpy(val_classes).to(params['device'])
    val_feats = ut.generate_model_input_feats(
        spa_enc_type=params['spa_enc_type'],
        locs=val_locs,
        dates=val_dates,
        params=params,
        device=params['device'])
    val_dataset = LocationDataLoader(val_feats, val_labels, val_users,
                                     params['num_classes'], False)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             num_workers=0,
                                             batch_size=params['batch_size'],
                                             shuffle=False)

    # create model
    params['num_feats'] = train_feats.shape[1]
    # model = models.FCNet(num_inputs=params['num_feats'], num_classes=params['num_classes'],
    #                      num_filts=params['num_filts'], num_users=params['num_users']).to(params['device'])
    model = ut.get_model(train_locs=train_locs,
                         params=params,
                         spa_enc_type=params['spa_enc_type'],
                         num_inputs=params['num_feats'],
                         num_classes=params['num_classes'],
                         num_filts=params['num_filts'],
                         num_users=params['num_users'],
                         device=params['device'])
    optimizer = torch.optim.Adam(model.parameters(), lr=params['lr'])

    # set up grid to make dense prediction across world
    gp = grid.GridPredictor(mask, params)

    # plot ground truth
    plt.close('all')
    plot_gt_locations(params, mask, train_classes, class_of_interest, classes,
                      train_locs, train_dates, op_dir)

    # main train loop
    for epoch in range(0, params['num_epochs']):
        print('\nEpoch\t{}'.format(epoch))
        train(model, train_loader, optimizer, epoch, params)
        test(model, val_loader, params)

        # save dense prediction image
        # grid_pred: (1002, 2004)
        grid_pred = gp.dense_prediction(model, class_of_interest)
        op_file_name = op_dir + str(epoch).zfill(4) + '_' + str(
            class_of_interest).zfill(4) + '.jpg'
        plt.imsave(op_file_name, 1 - grid_pred, cmap='afmhot', vmin=0, vmax=1)

    if params['use_date_feats']:
        print('\nGenerating predictions for each month of the year.')
        if not os.path.isdir(op_dir + 'time/'):
            os.makedirs(op_dir + 'time/')
        for ii, tm in enumerate(np.linspace(0, 1, 13)):
            grid_pred = gp.dense_prediction(model, class_of_interest, tm)
            op_file_name = op_dir + 'time/' + str(class_of_interest).zfill(
                4) + '_' + str(ii) + '.jpg'
            plt.imsave(op_file_name,
                       1 - grid_pred,
                       cmap='afmhot',
                       vmin=0,
                       vmax=1)

    # save trained model
    print('Saving output model to ' + params['model_file_name'])
    op_state = {
        'epoch': epoch + 1,
        'state_dict': model.state_dict(),
        'optimizer': optimizer.state_dict(),
        'params': params
    }
    torch.save(op_state, params['model_file_name'])
Example #5
0
from glob import glob
import pytest
from testosterone.log import log
from testosterone.Platform import Platform
from testosterone.helper import get_no_valgrind, pyv3io_binding
from testosterone.BridgeAdapterSim import BridgeAdapterSim, SyncHandler
from testosterone.recovery_utils import query_node_state_util, recovery_utils_init
import paths
import time
from testosterone.data_dog_pytest_report import datadog_report as datadog_report
from subprocess import Popen, PIPE

SRC_DIR = sys.argv[1]
BIN_DIR = sys.argv[2]

paths = paths.get_paths(BIN_DIR, SRC_DIR)
recovery_utils_init(paths)

NODE_CONFIG_CMD = os.path.join(SRC_DIR, paths.node_directory_src_relative_path,
                               'config', 'node_config.json')
NODE_CONFIG_EMERGENCY_SHUTDOWN_CMD = os.path.join(
    SRC_DIR, paths.node_directory_src_relative_path, 'config',
    'node_config_emergency_shutdown.json')
NODE_CONFIG_DEFAULT_CMD = os.path.join(SRC_DIR,
                                       paths.node_directory_src_relative_path,
                                       'config', 'node_config_default.json')
BRIDGE_CONFIG_CMD = os.path.join(SRC_DIR,
                                 paths.bridge_directory_src_relative_path,
                                 'config', 'bridge_config.json')

run_node_valgrind = not get_no_valgrind(BIN_DIR, False)
Example #6
0
def main():

    # hyper params
    params = {}
    params['dataset'] = 'inat_2018'  # inat_2018, inat_2017, birdsnap, nabirds, yfcc
    if params['dataset'] in ['birdsnap', 'nabirds']:
        params['meta_type'] = 'ebird_meta'  # orig_meta, ebird_meta
    else:
        params['meta_type'] = ''
    params['batch_size'] = 1024
    params['lr'] = 0.0005
    params['lr_decay'] = 0.98
    params['num_filts'] = 256  # embedding dimension
    params['num_epochs'] = 30
    params['log_frequency'] = 50
    params['device'] = 'cuda' if torch.cuda.is_available() else 'cpu'
    params['balanced_train_loader'] = True
    params['max_num_exs_per_class'] = 100
    params['map_range'] = (-180, 180, -90, 90)

    # specify feature encoding for location and date
    params['use_date_feats'] = True  # if False date feature is not used
    params['loc_encode']     = 'encode_cos_sin'  # encode_cos_sin, encode_3D, encode_none
    params['date_encode']    = 'encode_cos_sin' # encode_cos_sin, encode_none

    # specify loss type
    # appending '_user' models the user location and object affinity - see losses.py
    params['train_loss'] = 'full_loss_user'  # full_loss_user, full_loss

    print('Dataset   \t' + params['dataset'])
    op = dt.load_dataset(params, 'val', True, True)
    train_locs = op['train_locs']
    train_classes = op['train_classes']
    train_users = op['train_users']
    train_dates = op['train_dates']
    val_locs = op['val_locs']
    val_classes = op['val_classes']
    val_users = op['val_users']
    val_dates = op['val_dates']
    class_of_interest = op['class_of_interest']
    classes = op['classes']
    params['num_classes'] = op['num_classes']

    if params['meta_type'] == '':
        params['model_file_name'] = '../models/model_' + params['dataset'] + '.pth.tar'
    else:
        params['model_file_name'] = '../models/model_' + params['dataset'] + '_' + params['meta_type'] + '.pth.tar'
    op_dir = 'ims/ims_' + params['dataset'] + '/'
    if not os.path.isdir(op_dir):
        os.makedirs(op_dir)

    # process users
    # NOTE we are only modelling the users in the train set - not the val
    un_users, train_users = np.unique(train_users, return_inverse=True)
    train_users = torch.from_numpy(train_users).to(params['device'])
    params['num_users'] = len(un_users)
    if 'user' in params['train_loss']:
        assert (params['num_users'] != 1)  # need to have more than one user

    # print stats
    print('\nnum_classes\t{}'.format(params['num_classes']))
    print('num train    \t{}'.format(len(train_locs)))
    print('num val      \t{}'.format(len(val_locs)))
    print('train loss   \t' + params['train_loss'])
    print('model name   \t' + params['model_file_name'])
    print('num users    \t{}'.format(params['num_users']))
    if params['meta_type'] != '':
        print('meta data    \t' + params['meta_type'])

    # load ocean mask for plotting
    mask = np.load(get_paths('mask_dir') + 'ocean_mask.npy').astype(np.int)

    # data loaders
    train_labels = torch.from_numpy(train_classes).to(params['device'])
    train_feats = generate_feats(train_locs, train_dates, params)
    train_dataset = LocationDataLoader(train_feats, train_labels, train_users, params['num_classes'], True)
    if params['balanced_train_loader']:
        train_loader = torch.utils.data.DataLoader(train_dataset, num_workers=0, batch_size=params['batch_size'],
                       sampler=ut.BalancedSampler(train_classes.tolist(), params['max_num_exs_per_class'],
                       use_replace=False, multi_label=False), shuffle=False)
    else:
        train_loader = torch.utils.data.DataLoader(train_dataset, num_workers=0, batch_size=params['batch_size'], shuffle=True)

    val_labels = torch.from_numpy(val_classes).to(params['device'])
    val_feats = generate_feats(val_locs, val_dates, params)
    val_dataset = LocationDataLoader(val_feats, val_labels, val_users, params['num_classes'], False)
    val_loader = torch.utils.data.DataLoader(val_dataset, num_workers=0, batch_size=params['batch_size'], shuffle=False)

    # create model
    params['num_feats'] = train_feats.shape[1]
    model = models.FCNet(num_inputs=params['num_feats'], num_classes=params['num_classes'],
                         num_filts=params['num_filts'], num_users=params['num_users']).to(params['device'])
    optimizer = torch.optim.Adam(model.parameters(), lr=params['lr'])

    # set up grid to make dense prediction across world
    gp = grid.GridPredictor(mask, params)

    # plot ground truth
    plt.close('all')
    plot_gt_locations(params, mask, train_classes, class_of_interest, classes, train_locs, train_dates, op_dir)


    # main train loop
    for epoch in range(0, params['num_epochs']):
        print('\nEpoch\t{}'.format(epoch))
        train(model, train_loader, optimizer, epoch, params)
        test(model, val_loader, params)

        # save dense prediction image
        grid_pred = gp.dense_prediction(model, class_of_interest)
        op_file_name = op_dir + str(epoch).zfill(4) + '_' + str(class_of_interest).zfill(4) + '.jpg'
        plt.imsave(op_file_name, 1-grid_pred, cmap='afmhot', vmin=0, vmax=1)


    if params['use_date_feats']:
        print('\nGenerating predictions for each month of the year.')
        if not os.path.isdir(op_dir + 'time/'):
            os.makedirs(op_dir + 'time/')
        for ii, tm in enumerate(np.linspace(0,1,13)):
           grid_pred = gp.dense_prediction(model, class_of_interest, tm)
           op_file_name = op_dir + 'time/' + str(class_of_interest).zfill(4) + '_' + str(ii) + '.jpg'
           plt.imsave(op_file_name, 1-grid_pred, cmap='afmhot', vmin=0, vmax=1)


    # save trained model
    print('Saving output model to ' + params['model_file_name'])
    op_state = {'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer' : optimizer.state_dict(),
                'params' : params}
    torch.save(op_state, params['model_file_name'])