def main(): args = get_arguments() utils.print_args(args) dataset = data_loading.get_dataset(args.dataset, args.normalize_raw, args.normalize_reps, args.cifar_path) print("Collected arguments and raw dataset.") if args.network_type == 'simple': input_shape = args.dim_red if args.dim_red is not None else dataset.get_raw_input_shape( ) rounds.add_network_to_vector_rounds(args.rounds, dataset, input_shape, args.neurons, args.max_iter_optimization, args.alpha_optimization, args.n_train, args.dim_red, network_type='simple') elif args.network_type == 'conv': input_shape = dataset.get_raw_input_shape(True) rounds.add_network_to_vector_rounds(args.rounds, dataset, input_shape, args.neurons, args.max_iter_optimization, args.alpha_optimization, args.n_train, None, network_type='conv') else: raise ValueError("Network type {} not supported".format( args.network_type)) print("Finished getting Representations") print("Getting represented dataset:") print("Getting training examples...") x, y = dataset.get_training_examples(args.n_train, dim_reduction=args.dim_red, print_progress=True) print("Getting test examples...") x_test, y_test = dataset.get_test_examples(args.n_test, dim_reduction=args.dim_red, print_progress=True) print("Getting final linear separator") w = svm.get_linear_separator(x, y, type_of_classifier='sdca', verbose=2, alpha=args.alpha_evaluation, max_iter=args.max_iter_evaluation) performance = evaluation.evaluate_model(w, x, y) print("train performance is {}".format(performance)) performance = evaluation.evaluate_model(w, x_test, y_test) print("test performance is {}".format(performance))
def main_nn(): args = get_network_training_args() utils.print_args(args) dataset = data_loading.get_dataset(args.dataset, args.normalize_raw, False, args.cifar_path) if args.network_type == 'simple': d = args.dim_red if args.dim_red is not None else dataset.get_raw_input_shape( ) model = FCNetwork(d, args.neurons, args.layers) else: # meaning args.network_type == 'conv' input_shape = dataset.get_raw_input_shape(True) model = ConvNetwork(input_shape, args.neurons, args.layers, args.kernel_size, auto_pad=True) model.to(utils.get_device()) print("Model device is cuda? {}".format(next(model.parameters()).is_cuda)) x, y = dataset.get_training_examples( args.n_train, False, args.dim_red if args.network_type == 'simple' else None) x_test, y_test = dataset.get_test_examples( args.n_test, False, args.dim_red if args.network_type == 'simple' else None) train_network(model, x, y, x_test, y_test, args.epochs, args.batch_size, optimizer=args.optimizer, lr=args.learning_rate, weight_decay=args.weight_decay, verbose=args.verbose)
from pathlib import Path from data_loading import get_dataset dataset_dir = Path("data") train_dir = dataset_dir / "training_set" test_dir = dataset_dir / "test_set" train_ds = get_dataset(train_dir) test_ds = get_dataset(test_dir) image_batch, label_batch = next(iter(train_ds))
def get_average_songs_per_user(dataset) -> float: return dataset.count_nonzero() / dataset.shape[0] def get_average_users_per_song(dataset) -> float: return dataset.count_nonzero() / dataset.shape[1] start_time = time.time() dataset = get_dataset(users_count, songs_count, minimum_songs_per_user, minimum_users_per_song, make_binary=True, make_log=False, add_one=False) print("Data sparsity:", dataset.count_nonzero() / (dataset.shape[0] * dataset.shape[1])) print("songs_count =", dataset.shape[1]) # print(dataset.toarray()) X_train, X_test = train_test_split(dataset, test_size=test_proportion, random_state=0) # todo: remove these annotations