Esempio n. 1
0
def pedestrians(data_root):
    ''' Return a list of bounding boxes in the format frame, bb_id, x,y,dx,dy '''
    #return [[1,1,617,128,20,50]]

    # If the memory space is enough you can use the following function, instead of
    # training hog, lbp, luv separately
    # train_kmeans(save_path='./Models')

    print('Training k-means HoG...')
    train_kmeans_hog(save_path='./Models')
    print('Training k-means LBP...')
    train_kmeans_lbp(save_path='./Models')
    print('Training k-means LUV...')
    train_kmeans_luv(save_path='./Models')

    print('Building dataset...')
    build_dataset(save_path='./Dataset/', kmeans_path='./Models/')

    print('Training classifier...')
    train_classifier(save_path='./Models', data_path='./Dataset')

    print('Detecting...')
    sol = detect(save_path='./gt',
                 model_path='./Models/',
                 image_path=data_root)

    return sol
Esempio n. 2
0
def callback():
    code = flask.request.args.get('code')
    url = 'https://accounts.spotify.com/api/token'
    body = {
        'grant_type': 'authorization_code',
        'code': code,
        'redirect_uri': 'http://localhost:5000/callback',
        'client_id': client,
        'client_secret': secret
    }

    res = requests.post(url=url, data=body)
    token = json.loads(res.text)['access_token']

    bar = playlist.Playlist(id='37i9dQZF1E35k73659EuOH', auth_token=token)
    songs = bar.get_tracks()
    songs_short = songs[:10]

    dataset = build_dataset(
        genre_list=[
            'rap', 'country', 'rock', 'edm', 'folk', 'christian', 'metal',
            'punk'
        ],
        token=token,
    )
    print(dataset)
    return ({'set': dataset})
Esempio n. 3
0
def evolution(ModelClass, iterations):
    """Evolution algorithm."""

    # initialize some random models
    models = ModelClass.random_models(10)

    errors = []

    for i in range(iterations):
        errors = []

        for model in models:
            # prepare the data
            x, y = build_dataset(model.input_options,
                                 model.model_options["predict_n"], True)
            # split the data into training set and testing set
            x_train, x_test, y_train, y_test = train_test_split(x,
                                                                y,
                                                                test_size=0.2)
            # train the model
            model.train(x_train, y_train)
            # calculate the model error
            y_predict = model.predict(x_test)
            errors.append(model.error(y_test, y_predict))

        # select top models
        error_idx_sorted = np.argsort(errors)
        top_models = [models[i] for i in error_idx_sorted[:2]]

        # cross-over models and breed new models
        if i < iterations - 1:
            models = ModelClass.evolve(top_models, 10)

    # return the best model
    best_model_idx = np.argmin(errors)
    best_model = models[best_model_idx]
    return best_model
d_g_iter = 6  # (D's training epochs / G's training epochs = d_g_iter - 1)
learning_rate = 1e-4
input_dim = 121
method = 'cross-e'
weight = 0.9
degree = 'euclidean'
logdir = create_logdir(mode, method, weight, degree)
save_path = os.path.join(base_dir, logdir)
best_auprc = 0.
best_f1 = 0.

if not os.path.exists(save_path):
    os.makedirs(save_path)

if not os.path.exists('{}_dataset.pkl'.format(mode)):
    build_dataset(mode)

with open('{}_dataset.pkl'.format(mode), 'rb') as f:
    train_set = pickle.load(f)
    val_set = pickle.load(f)
    test_set = pickle.load(f)

x_train, y_train = train_set
x_val, y_val = val_set


def _eval(sess, model, test_data, label):
    ano_scores = []
    for _, batch_test_data in DataInput(test_data, test_batch_size):
        _ano_score, _, _ = model.eval(sess, batch_test_data)
        # Extend
Esempio n. 5
0
		if (len(i.split("_")) <3):
			continue
		image_name = i+"_"+name_base + ".jpg"
		move(image_name, "ForG\\evaluation\\" + image_name)


	print("Building training ...")
	for i in index[1]:
		if (len(i.split("_")) <3):
			continue
		image_name = i+"_"+name_base+".jpg"
		move(image_name, "ForG\\training\\" + image_name)



	build_dataset(TRAIN, TEST, VAL, ORIG_INPUT_DATASET, CLASSES, BASE_PATH)




	rmtree('ForG')


	extract_features(TRAIN, TEST, BASE_PATH, BASE_CSV_PATH, BATCH_SIZE, LE_PATH)
	# train(info = "Result of Right Mic with Feature Extraction - VGG\nRight Mic\nRevised each of the spectrogram\n\t1. with half of overlap.\n\t2. with half of overlap and half of window length\nTotal amount of images 30000")
	train("Result of Right Mic with Feature Extraction - VGG\nRandom choose 500 gap 500 foliage for all 10 datasets \nRevised each of the spectrogram\n\twindow length: longest, overlap: 0\nTotal amount of images 30000", BASE_CSV_PATH, TRAIN, TEST, MODEL_PATH, LE_PATH)

	print("Archiving file ...")
	make_archive(name_base, 'zip', BASE_PATH)

	print("Removing files ...")
Esempio n. 6
0
import build_dataset
import build_models
import subprocess

if len(sys.argv) <= 1:
    print('No command passed!')
    exit
command = sys.argv[1]


def get_path(p):
    return os.path.normpath(
        os.path.join(os.path.dirname(os.path.abspath(__file__)), p))


if command == 'build-dataset':
    build_dataset.build_dataset()
elif command == 'build-models':
    build_models.build_models()
elif command == 'build':
    build_dataset.build_dataset()
    build_models.build_models()
elif command == 'web':
    p = get_path('./src/ui/web')
    subprocess.call("cd %s & flask run" % p, shell=True)
elif command == 'gui':
    p = get_path('./src/ui/gui/main.py')
    subprocess.call("python " + p, shell=True)
else:
    print('Unrecognized command.')
Esempio n. 7
0
mv_seed = args.mv_seed

for dataset in args.dataset:
    for mv_type in args.mv_type:
        print("Running", dataset, mv_type, mv_seed)

        # build data
        cache_dir = os.path.join(args.cache_dir, dataset, str(mv_seed),
                                 mv_type)
        if not args.real:
            data, info = build_dataset(args.data_dir,
                                       dataset,
                                       val_size=args.cache_val_size,
                                       test_size=args.test_size,
                                       max_size=args.max_size,
                                       mv_prob=args.mv_prob,
                                       mv_type=mv_type,
                                       random_state=mv_seed,
                                       save_dir=cache_dir)
        else:
            data, info = build_real_dataset(args.data_dir,
                                            dataset,
                                            val_size=args.cache_val_size,
                                            test_size=args.test_size,
                                            max_size=args.max_size,
                                            random_state=mv_seed,
                                            save_dir=cache_dir)

        cache_dir = os.path.join(cache_dir, "X_train_repairs")
        data["X_train_repairs"] = repair(data["X_train_dirty"],
Esempio n. 8
0
image_width = 400
image_height = 300
batch_size = 2
num_epochs = 10
learning_rate = 0.0002
beta1 = 0.5
shuffle_buffer = 100  #定义随机打乱数据时buffer的大小
ngf = 32
ndf = 64
MODEL_SAVE_PATH = './checkpoints'
MODEL_NAME = 'model.ckpt'

if not os.path.exists(MODEL_SAVE_PATH):
    os.makedirs(MODEL_SAVE_PATH)

data_X = build_dataset(tfrecords_Xpath, image_width, image_height, batch_size, num_epochs, shuffle_buffer)
iterator_X = data_X.make_initializable_iterator()
image_batch_X = iterator_X.get_next()

data_Y = build_dataset(tfrecords_Ypath, image_width, image_height, batch_size, num_epochs, shuffle_buffer)
iterator_Y = data_Y.make_initializable_iterator()
image_batch_Y = iterator_Y.get_next()

G = Generator(ngf, 'G')
F = Generator(ngf, 'F')
D_X = Discriminator(ndf, 'D_X')
D_Y = Discriminator(ndf, 'D_Y')

fake_images_X = np.zeros((50, batch_size, image_height, image_width, 3))
fake_images_Y = np.zeros((50, batch_size, image_height, image_width, 3))
Esempio n. 9
0
        for d in [0,1]:
            with open("RIRs_new\\room{0}_position{1}_distance{2}.pkl".format(r,p,d), 'rb') as f:
                _rir = pkl.load(f)
            RIRs = np.concatenate((RIRs,_rir))
            
#%% Merge RIRs - testing
    
RIRs = np.zeros((0,4),float)
for r in range(0,2):
    for p in range(0,7):
        for d in [0,1]:
            with open("Test\\room{0}_position{1}_distance{2}.pkl".format(r,p,d), 'rb') as f:
                _rir = pkl.load(f)
            RIRs = np.concatenate((RIRs,_rir))

x_valid,y_valid=build_dataset(RIRs,test=1)

open('x_valid_sr.dat', 'wb').write(x_valid)
open('y_valid_sr.dat', 'wb').write(y_valid)

#%% Merge RIRs - LOCATA
    
RIRs = np.zeros((0,12),float)
for r in range(0,1):
    for p in range(0,7):
        for d in [0]:
            with open("RIRs_LOCATA\\room{0}_position{1}_distance{2}.pkl".format(r,p,d), 'rb') as f:
                _rir = pkl.load(f)
            RIRs = np.concatenate((RIRs,_rir))

x_valid,y_valid=build_dataset(RIRs,rir_len=9600,test=1)
Esempio n. 10
0
def ds_config_to_datasets(ds_config):
    return model_utils.ds_config_to_datasets(
        ds_config, lambda paths: build_dataset(paths).map(tf_to_onehot))
Esempio n. 11
0
#--Create dataset and dataloader----------------------------------------------------------------------------------

#define transform ops
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])

transform_img = transforms.Compose([
    transforms.ToTensor(),
    normalize,
])

#create dataset
from build_dataset import build_dataset

dataset = build_dataset(train_stage, image_path, data_path, transform_img,
                        mode, dataset_indicator)

#dataloader
test_loader = torch.utils.data.DataLoader(dataset,
                                          batch_size=BATCH_SIZE,
                                          shuffle=False,
                                          **kwargs)

#--Model----------------------------------------------------------------------------------

from mymodel import build_mymodel
model, _ = build_mymodel(mode, data_path, CUDA, image_size, latent_len, blk_len, num_class, max_seq, num_word,\
                                 None, None, train_stage, [img_net_type, ingre_net_type], None, None)
model.eval()

# --performance----------------------------------------------------------------
Esempio n. 12
0
max_class_label=13

def tf_to_onehot(samples,device_id,_1,_2):
    # It works for a single y
    def to_onehot(x, y, min_y, max_y):
        z = np.zeros([max_y - min_y + 1])
        z[y-min_y] = 1

        return x,z

    return tf.py_function(
        lambda x,y: to_onehot(x,y,min_class_label, max_class_label),
        (samples,device_id),
        [tf.float32, tf.int64]
    )


train_paths = [
    '/mnt/lebensraum/Datasets/Day1.Equalized/Device_9/tx_7/converted_576floats.protobin'
]

test_dataset = build_dataset(train_paths)
test_dataset = test_dataset.map(tf_to_onehot)

it = get_data("/mnt/lebensraum/Datasets/Day1.Equalized/Device_9/tx_7/1587959509-4155354-fb.bin", 576)

for x in test_dataset:
    # assert np.array_equal(x[0], next(it))
    print(x[0])

    sys.exit(1)
Esempio n. 13
0
            else:
                print("unrecognized input")
    os.mkdir(out_dir)

    print(tabulate(vars(args).items(), headers={"parameter", "value"}))
    with open(os.path.join(Path(out_dir), "args.txt"), "w") as f:
        print(tabulate(vars(args).items(), headers={"parameter", "value"}),
              file=f)

    t0 = time.time()

    ########## preparing the dataset ##########

    if not args.skip_databuild and not args.resume:
        print(f"\n*** {datetime.now()}: building data set ***")
        build_dataset(args)

    t1 = time.time()

    ########## training the target LM model ##########

    if not args.resume:
        print(f"\n\n*** {datetime.now()}: training target LM model ***\n")
        train_targetmodel(args)

    t2 = time.time()

    ########## training the translation model ##########

    print(f"\n\n*** {datetime.now()}: training translation model ***\n")
    train_translationmodel(args)