def classification_preprocess_all_datasets():
    """
    Preprocesses all datasets to be ready for classification task.
    This will include stemming, word correction, lower-casing, hashtag removal, special char removal.
    """
    
    for i in range(0,len(utils.annotated_datasets)):
        tweetlines = utils.get_dataset(utils.annotated_datasets[i])
        tweets = []
        for line in tweetlines:
            if len(line)>1:
                tweets.append(tweet.to_tweet(line))
        
#        tweets = lower_case(tweets)
        tweets = remove_hastags_and_users(tweets)
        tweets = count_emoticons(tweets)
        tweets = replace_links(tweets)
        tweets = remove_specialchars(tweets)
        tweets = correct_words(tweets)
        tweets = stem(tweets)
        tweets = tokenize(tweets)
        tweets = pos_tag(tweets)
        tweets = count_exclamations(tweets)

        analyzer = Analyzer(utils.annotated_datasets[i], tweets)
        stats = analyzer.analyze()
        print stats
        #store tweets in pickles...
        print "Storing pickles..."
        utils.store_pickles(tweets, utils.annotated_datasets[i][24:len(utils.annotated_datasets[i])-4])
Beispiel #2
0
def preprocess_temporal_dataset():
    tweetlines = utils.get_dataset(utils.complete_datasets[3])
    tweets = []
    for line in tweetlines:
        if len(line) > 1:
            tweets.append(tweet.to_tweet(line))
    tweets = preprocessing.preprocess_tweets(tweets)
    sentiments = lexicon.perform_google_sentiment_lexicon_lookup(tweets)
    pickle.dump(sentiments, open("temporal_sentiments", "wb"))
    pickle.dump(tweets, open("temporal_tweets2", "wb"))
def initial_preprocess_all_datasets():
    """
    Runs first preprocessing iteration on all datasets.
    This is the preprocessing routine performed initially on the datasets before annotation.
    This routine includes duplicate removal
    """
        
    for i in range(0,len(utils.datasets)):
        #Fetch from dataset
        tweets = []
        tweetlines = utils.get_dataset(utils.complete_datasets[i])
        for tweetline in tweetlines:
            tweets.append(tweet.to_tweet(tweetline))
            
        #Perform preprocessing
        tweets = remove_duplicates_and_retweets(tweets)

        #Store back to dataset
        tweetlines = []
        for t in tweets:
            tweetlines.append(t.to_tsv())
        utils.store_dataset(tweetlines, utils.datasets[i])
Beispiel #4
0
def test_supermodel(datasets, args, n_folds=5, times=20, verbose=False):
    for item in datasets:
        print(type(item))
        if isinstance(item, tuple):
            X, y = item[0], item[1]
        else:
            X, y, _ = utils.get_dataset(item)
            print("TESTING %s" % item.upper())
        print("Dataset shape: %s, %s" % (X.shape, y.shape))
        args["dataset_name"] = item
        super_res, classifiers_res = scores_stats(X, y, cv_type="stratified", n_folds=5, times=times, verbose=False, args=args)
        print("Classifiers results:")
        max_mean = 0
        max_std = 0
        for idx, val in enumerate(classifiers_res[0]):
            mean, std = (classifiers_res[0][idx], classifiers_res[1][idx])
            if mean > max_mean:
                max_mean = mean
                max_std = std
            print("Mean = %.4f, std = %.4f" % (mean, std))
        print("Max classifier mean:\nMean = %.4f, std = %.4f" % (max_mean, max_std))
        print("Superclassifier score")
        print("Mean = %.4f, std = %.4f" % (super_res[0], super_res[1]))
            np.savez(os.path.join(save_dir, video_name + '.npz'),
                     avg_score=avg_score.mean(0).cpu().numpy(),
                     weight=weight.mean(0).cpu().numpy()
                     if weight is not None else None,
                     global_score=global_score.mean(0).cpu().numpy(),
                     branch_scores=branch_scores)

    if args.include_train:

        train_dataset_dict = get_dataset(
            dataset_name=dataset_name,
            subset=args.train_subset_name,
            file_paths=file_paths,
            sample_rate=sample_rate,
            base_sample_rate=base_sample_rate,
            action_class_num=action_class_num,
            modality='flow',
            feature_type=feature_type,
            feature_oversample=feature_oversample,
            temporal_aug=False,
        )

        train_detect_dataset = SingleVideoDataset(
            train_dataset_dict, single_label=False,
            random_select=False)  # SIngle label false!!!

        train_detect_loader = torch.utils.data.DataLoader(train_detect_dataset,
                                                          batch_size=1,
                                                          pin_memory=True,
                                                          shuffle=False)
Beispiel #6
0
def get_data_loaders(args, tokenizer):
    """ Prepare the dataset for training and evaluation """
    personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache)

    logger.info("Build inputs and labels")
    datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
    for dataset_name, dataset in personachat.items():
        num_candidates = len(dataset[0]["utterances"][0]["candidates"])
        if args.num_candidates > 0 and dataset_name == 'train':
            num_candidates = min(args.num_candidates, num_candidates)
        for dialog in dataset:
            persona = dialog["personality"].copy()
            for _ in range(
                    max(1, min(args.personality_permutations, len(persona)))):
                for utterance in dialog["utterances"]:
                    history = utterance["history"][-(2 * args.max_history +
                                                     1):]
                    for j, candidate in enumerate(
                            utterance["candidates"][-num_candidates:]):
                        lm_labels = bool(j == num_candidates - 1)
                        instance = build_input_from_segments(
                            persona, history, candidate, tokenizer, lm_labels)
                        for input_name, input_array in instance.items():
                            datasets[dataset_name][input_name].append(
                                input_array)
                    datasets[dataset_name]["mc_labels"].append(num_candidates -
                                                               1)
                    datasets[dataset_name]["n_candidates"] = num_candidates
                if len(persona) > 1:
                    persona = [persona[-1]
                               ] + persona[:-1]  # permuted personalities

    logger.info("Pad inputs and convert to Tensor")
    tensor_datasets = {"train": [], "valid": []}
    for dataset_name, dataset in datasets.items():
        dataset = pad_dataset(dataset,
                              padding=tokenizer.convert_tokens_to_ids(
                                  SPECIAL_TOKENS[-1]))
        for input_name in MODEL_INPUTS:
            tensor = torch.tensor(dataset[input_name])
            if input_name != "mc_labels":
                tensor = tensor.view((-1,
                                      datasets[dataset_name]["n_candidates"]) +
                                     tensor.shape[1:])
            tensor_datasets[dataset_name].append(tensor)

    logger.info("Build train and validation dataloaders")
    train_dataset, valid_dataset = TensorDataset(
        *tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_dataset) if args.distributed else None
    valid_sampler = torch.utils.data.distributed.DistributedSampler(
        valid_dataset) if args.distributed else None
    train_loader = DataLoader(train_dataset,
                              sampler=train_sampler,
                              batch_size=args.train_batch_size,
                              shuffle=(not args.distributed))
    valid_loader = DataLoader(valid_dataset,
                              sampler=valid_sampler,
                              batch_size=args.valid_batch_size,
                              shuffle=False)

    logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(
        train_dataset.tensors[0].shape))
    logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(
        valid_dataset.tensors[0].shape))
    return train_loader, valid_loader, train_sampler, valid_sampler

special_chars_removal = '(<|>|{|}|[|]|-|_|*|")'

replacement_chars = {u"&": u"og",
                     u"6amp;": u"og",
                     u"+": u"og"}
        
if __name__ == '__main__':
    #Testing
#    tweets = [Tweet("13:37", "johnarne", "Jeg () haaater drittt!!!? :( #justinbieber"), Tweet("13:37", "johnarne", "Jeg eeelsker @erna_solberg http://www.erna.no :) #love #jernerna" )]
#    for tweet in tweets:
#        tweet.set_sentiment("negative")
#        print tweet
    
    tweetlines = utils.get_dataset("test_annotated_data/erna_dataset.tsv")
    tweets = []
    for line in tweetlines:
        if len(line)>1:
            tweets.append(tweet.to_tweet(line))
        
    
#    tweets = lower_case(tweets)
    tweets = remove_hastags_and_users(tweets)
    tweets = count_emoticons(tweets)
    tweets = replace_links(tweets)
    tweets = remove_specialchars(tweets)
    for tweet in tweets:
        print tweet
    tweets = correct_words(tweets)
    tweets = stem(tweets)
    tmp_X_test = utils.remove_feature(tmp_X_test, 11)
    tmp_X_test = utils.remove_feature(tmp_X_test, 10)
    tmp_X_test = utils.remove_feature(tmp_X_test, 9)
    tmp_X_test = utils.remove_feature(tmp_X_test, 8)
    tmp_X_test = utils.remove_feature(tmp_X_test, 7)
    tmp_X_test = utils.remove_feature(tmp_X_test, 6)
    # tmp_X_test = utils.remove_feature(tmp_X_test, 5)
    # tmp_X_test = utils.remove_feature(tmp_X_test, 4)
    # tmp_X_test = utils.remove_feature(tmp_X_test, 3)
    # tmp_X_test = utils.remove_feature(tmp_X_test, 2)
    # tmp_X_test = utils.remove_feature(tmp_X_test, 1)
    # tmp_X_test = utils.remove_feature(tmp_X_test, 0)
    return tmp_X_train, tmp_X_test


X, Y = utils.get_dataset('dataset_threshold_100.txt')
# X = preprocessing.normalize(X)
Y = utils.convert_Y_to_class_numbers(Y)
# X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                    Y,
                                                    test_size=0.2,
                                                    random_state=50)
# X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=40)
# X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=30)
# X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=20)
# X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=10)
# X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)

# X_train, X_test = prepare_dataset(X_train, X_test)
# plot_accuracy(X_train, Y_train, X_test, Y_test, KNeighborsClassifier(n_neighbors=5), 'KNN')
Beispiel #9
0
def associate(real_world: bool, audio: bool):
    project_dir = os.getcwd()
    cfg = utils.get_config(project_dir)
    bio_type = 'audio' if audio else 'video'
    data_path = os.path.join(
        project_dir, 'data',
        cfg['base_conf']['dataset'][utils.get_dataset(real_world, bio_type)])
    bio_data_path = os.path.join(data_path, cfg['base_conf']['biometric_data'])
    wifi_data_path = os.path.join(data_path, cfg['base_conf']['wifi_data'])
    wifi_thres = cfg['parameters']['wifi_threshold']
    meeting_thres = cfg['parameters']['meeting_threshold']
    cdist_metric = cfg['parameters']['cdist_metric']
    victims_thres = cfg['parameters']['estimated_victims']
    omega = cfg['parameters']['omega']

    # meeting_npy_paths, the npy path that contains feature vectors, after running feature_extraction
    # {'meeting_15': 'CrossLeak/data/audio_50vs20_100/bio_data/meeting_15/vecmeeting_15.npy'}}
    meeting_npy_paths = utils.get_meeting_and_path(bio_data_path, r'.+\.npy$')
    # {'meeting_51': 'CrossLeak/data/audio_50vs20_100/bio_data/meeting_51/segsmeeting_51.pk'}
    bio_path = utils.get_meeting_and_path(bio_data_path, r'.+segs.+\.pk$')
    assert len(meeting_npy_paths.keys()) == len(bio_path.keys())

    # get participants' names, given the corresponding WiFi sniffing files (wifi_thres is the cutoff rss threshold)
    meeting_people_name = utils.get_meeting_people_name(
        wifi_data_path, real_world, r'.+\.pk$', wifi_thres)

    # load and map poi name and mac address, respectively
    poi_name_mac = {}
    meeting_poi_name = collections.defaultdict(list)
    if real_world:
        for mac, poi in cfg['mac_name']:
            if poi not in poi_name_mac:
                poi_name_mac[poi] = mac
            else:
                logging.warning(
                    'duplicate mac address of {} with {} and {}'.format(
                        poi, poi_name_mac[poi], mac))
    else:
        poi = pickle.load(open(os.path.join(bio_data_path, 'POIs.pk'), 'rb'))
        non_poi = pickle.load(
            open(os.path.join(bio_data_path, 'nonPOIs.pk'), 'rb'))
        for peo in poi:
            poi_name_mac[peo] = peo
        for meeting, peos in meeting_people_name.items():
            for peo in peos:
                if peo in poi:
                    meeting_poi_name[meeting].append(peo)
                elif peo not in non_poi:
                    logging.warning('{} not in poi nor non_poi'.format(peo))

    # remove filter people
    excluded_peo = cfg['filter_people'][bio_type]
    for meeting, people_names in meeting_poi_name.items():
        meeting_poi_name[meeting] = [
            poi for poi in people_names if poi not in excluded_peo
        ]

    # flatten to get unique poi name
    poi_people = sorted({
        name
        for people_names in meeting_poi_name.values() for name in people_names
    })
    logging.info('poi people: {}'.format(poi_people))

    meeting_name = sorted(meeting_poi_name.keys())
    meeting_num = len(meeting_name)
    if meeting_thres == -1:
        meeting_thres = meeting_num

    # get meeting index by meeting name
    meeting_index = dict(zip(meeting_name, list(range(meeting_num))))
    poi_num = len(poi_people)

    context_infor = np.zeros([meeting_num, poi_num]).astype(np.float64)
    for meeting, people_names in meeting_poi_name.items():
        for poi in people_names:
            context_infor[meeting_index[meeting], poi_people.index(poi)] = 1

    bio_info = np.empty([0, cfg['pre_process'][bio_type]['dimension']])
    bio_paths = []
    for meeting in bio_path.keys():
        with open(bio_path[meeting], 'rb') as f:
            relative_bio_paths = pickle.load(f)
            absolute_bio_paths = [
                os.path.join(bio_data_path, p) for p in relative_bio_paths
            ]
            bio_paths.extend(absolute_bio_paths)
            # iou vec is the face / voice features via feature extractor
            iou_vec = np.load(meeting_npy_paths[meeting])
            try:
                bio_info = np.vstack((bio_info, iou_vec))
            except:
                logging.error('error in numpy vstack: {}'.format(meeting))

    # construct mac attendance vector
    real_mac_attendance = defaultdict(
        lambda: [0] * min(meeting_num, meeting_thres))
    if not real_world:
        # simply use people name to represent MAC address
        meeting_people_mac = utils.get_meeting_people_name(
            wifi_data_path, real_world, r'.+\.pk$', wifi_thres)

    for meeting, macs in meeting_people_mac.items():
        for mac in macs:
            real_mac_attendance[mac][meeting_index[meeting]] = 1

    # use mac_index to get the index of mac address in mac_attendance
    mac_index = {}
    poi_mac_attendance = []
    cnt = 0
    for poi in poi_people:
        if poi in poi_name_mac and poi_name_mac[poi] in real_mac_attendance:
            # divide real_mac_attendance into two sequences instead of random: first with poi and following non_poi.
            poi_mac_attendance.append(real_mac_attendance[poi_name_mac[poi]])
            mac_index[cnt] = poi_name_mac[poi]
            cnt += 1
            real_mac_attendance.pop(poi_name_mac[poi])
        else:
            # check if all poi has the name-mac mapping and poi attend at least one meeting
            logging.error(
                'poi {} do not have mac information or have not attended at least one meeting'
                .format(poi))
    assert len(real_mac_attendance) == 0

    # add the non poi part of mac_attendance
    non_poi_mac_attendance = []
    if real_world:
        for mac, attendance in real_mac_attendance.items():
            mac_index[cnt] = mac
            cnt += 1
            non_poi_mac_attendance.append(attendance)
    else:
        non_poi = pickle.load(
            open(os.path.join(bio_data_path, 'nonPOIs.pk'), 'rb'))
        non_poi_mac_attendance = [[
            0 for _ in range(min(meeting_num, meeting_thres))
        ] for _ in range(len(non_poi))]
        for oos_idx in range(len(non_poi)):
            random_rssi = np.random.normal(-60, 80,
                                           min(meeting_num, meeting_thres))
            for meeting in range(min(meeting_num, meeting_thres)):
                if random_rssi[meeting] >= wifi_thres:
                    non_poi_mac_attendance[oos_idx][meeting] = 1

    mac_attendance = np.concatenate(
        (poi_mac_attendance, non_poi_mac_attendance))

    if real_world:
        # remove always-on mac address, detected over 90% of all meetings, which may be router or something.
        always_on_index = np.where(
            mac_attendance.sum(axis=1) > 0.9 * mac_attendance.shape[1])[0]
        mac_attendance = np.delete(mac_attendance, always_on_index, axis=0)
        logging.info(
            'always on mac address no: {} of all meeting no: {}'.format(
                len(always_on_index), mac_attendance.shape[1]))
    logging.info('mac attendance len: {} with poi no: {}'.format(
        len(mac_attendance), len(poi_mac_attendance)))

    # concatenate features and attendance vector (ctx information)
    assert len(bio_paths) == len(bio_info)
    logging.info('bio_paths and bio_info len: {}'.format(len(bio_paths)))

    # get bio_features in valid meeting
    bio_features = []
    for bio_feature, path in zip(bio_info, bio_paths):
        parent = utils.get_parent_folder_name(path, 3)
        c = parent.split('_')
        if meeting_index['%s_%s' % (c[0], c[1])] < meeting_thres:
            bio_features.append(bio_feature)

    # event vector of MAC addresses
    wifi_people_in_meetings = np.zeros(
        [poi_num, min(meeting_num, meeting_thres)])
    for i in range(poi_num):
        for name in meeting_name:
            if meeting_index[name] < meeting_thres:
                wifi_people_in_meetings[i,
                                        meeting_index[name]] = context_infor[
                                            meeting_index[name], i]
    # for i in range(poi_num):
    #     for name in meeting_name:
    #         if poi_people[i] in meeting_poi_name[name]:
    #             if meeting_index[name] < meeting_thres:
    #                 wifi_people_in_meetings[i, meeting_index[name]] = 1

    scan(bio_features, bio_paths, bio_info, real_world, cdist_metric,
         victims_thres, omega, meeting_num, meeting_thres, meeting_index,
         wifi_people_in_meetings, poi_people, project_dir, bio_data_path,
         audio)
Beispiel #10
0
    flip = args.dataset == "subj"
    keywords = get_extended_keyword(keywords,
                                    vocab=None,
                                    weight=args.weight,
                                    n=args.extension)
    if pseudo:
        if args.extension > 0:
            print("Extended Keywords :", keywords)
        else:
            print("Original Keywords :", keywords)
    else:
        print("ORACLE MODE")

    x_train, x_test, y_test, lp, ln, sc = get_dataset(args.dataset,
                                                      key=keywords,
                                                      threshold=args.threshold,
                                                      pseudo=pseudo,
                                                      flip=flip)
    # check pseudo-labeling method
    tp = len(np.where(lp == 1)[0])
    fp = len(np.where(lp == -1)[0])
    tn = len(np.where(ln == -1)[0])
    fn = len(np.where(ln == 1)[0])
    print("--------------------------------------------")
    print("Result of Pseudo-labeling Algorithm 1")
    print("Pseudo-positive (true-p, false-p) = (%d, %d)" % (tp, fp))
    print("Pseudo-negative (false-n, true-n) = (%d, %d)" % (fn, tn))
    print("--------------------------------------------")
    pi = float(tp / len(lp))
    pi_p = float(fn / len(ln))
    print(
Beispiel #11
0
import pandas as pd
import matplotlib.pyplot as plt
import pickle

parser = argparse.ArgumentParser()
parser.add_argument("--weight-path", required=True, type=str)
parser.add_argument("--model-name", required=True, type=str, help=['preact50'])
parser.add_argument("--data-path",default="data")
parser.add_argument("--dataset", default="sim_real",type=str)
parser.add_argument("--size", default=224, type=int)
parser.add_argument("--batch-size",default=16, type=int)
parser.add_argument("--eval-batch-size", default=64, type=int)

args = parser.parse_args()

train_ds, test_ds = get_dataset(args)
model = get_model(args)

if __name__=='__main__':
    model.build((2, 224,224,3)) # Build
    model.load_weights(args.weight_path) # Load
    # Compile
    model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['acc'])
    # import IPython; IPython.embed();exit(1)
    labels, preds = [], []
    for img,label in test_ds:
        labels += list(label.numpy())
        preds += list(model.predict(img).argmax(1))
    cm = tf.math.confusion_matrix(labels, preds)
    print(cm)
    with open("data/label_to_id_dict.dict", "rb") as f:
Beispiel #12
0
import pickle
import numpy as np
from utils import get_dataset
import matplotlib.pyplot as plt
from utils import construct

window = 30
fitted_model = pickle.load(open('HMM_tau_' + str(window), 'rb'))

dataset = get_dataset(window)

p_comps = dataset.pca_results.components_
motifs = fitted_model.means_

all_labels = fitted_model.decode(dataset.all_reduced_segments)[1]
unique_labels, counts = np.unique(all_labels, return_counts=True)

sorted_unique_labels = unique_labels[np.argsort(counts)[::-1]]



""" Plot most common three motifs (judged by marginal prob)"""
motif_fig = plt.figure(figsize=(3, 3))
all_motifs = np.array([])
for i in range(10):
    true_ind = int(sorted_unique_labels[i])
    motif = construct(true_ind, p_comps, motifs)
    plt.plot(motif[1, :], -motif[0, :], label=str(i))
plt.legend()
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
Beispiel #13
0
def run():
    parser = ArgumentParser()
    parser.add_argument(
        "--dataset_path",
        type=str,
        default="",
        help="Path or url of the dataset. If empty download from S3.")
    parser.add_argument("--dataset_cache",
                        type=str,
                        default='./dataset_cache',
                        help="Path or url of the dataset cache")
    parser.add_argument(
        "--model",
        type=str,
        default="openai-gpt",
        help="Model type (openai-gpt or gpt2)",
        choices=['openai-gpt',
                 'gpt2'])  # anything besides gpt2 will load openai-gpt
    parser.add_argument("--model_checkpoint",
                        type=str,
                        default="",
                        help="Path, url or short name of the model")
    parser.add_argument(
        "--max_history",
        type=int,
        default=2,
        help="Number of previous utterances to keep in history")
    parser.add_argument("--device",
                        type=str,
                        default="cuda" if torch.cuda.is_available() else "cpu",
                        help="Device (cuda or cpu)")

    parser.add_argument("--no_sample",
                        action='store_true',
                        help="Set to use greedy decoding instead of sampling")
    parser.add_argument("--max_length",
                        type=int,
                        default=20,
                        help="Maximum length of the output utterances")
    parser.add_argument("--min_length",
                        type=int,
                        default=1,
                        help="Minimum length of the output utterances")
    parser.add_argument("--seed", type=int, default=0, help="Seed")
    parser.add_argument("--temperature",
                        type=int,
                        default=0.7,
                        help="Sampling softmax temperature")
    parser.add_argument(
        "--top_k",
        type=int,
        default=0,
        help="Filter top-k tokens before sampling (<=0: no filtering)")
    parser.add_argument(
        "--top_p",
        type=float,
        default=0.9,
        help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__file__)
    logger.info(pformat(args))

    if args.model_checkpoint == "":
        if args.model == 'gpt2':
            raise ValueError(
                "Interacting with GPT2 requires passing a finetuned model_checkpoint"
            )
        else:
            args.model_checkpoint = download_pretrained_model()

    if args.seed != 0:
        random.seed(args.seed)
        torch.random.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)

    logger.info("Get pretrained model and tokenizer")
    tokenizer_class, model_class = (
        GPT2Tokenizer,
        GPT2LMHeadModel) if args.model == 'gpt2' else (OpenAIGPTTokenizer,
                                                       OpenAIGPTLMHeadModel)
    tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)
    model = model_class.from_pretrained(args.model_checkpoint)
    model.to(args.device)
    add_special_tokens_(model, tokenizer)

    logger.info("Sample a personality")
    dataset = get_dataset(tokenizer, args.dataset_path, args.dataset_cache)
    personalities = [
        dialog["personality"] for dataset in dataset.values()
        for dialog in dataset
    ]
    personality = random.choice(personalities)
    logger.info("Selected personality: %s",
                tokenizer.decode(chain(*personality)))

    history = []
    engine = pyttsx3.init()
    r = sr.Recognizer()
    while True:
        print("Talk:")
        with sr.Microphone() as source:
            audio = r.listen(source)
        raw_text = r.recognize_google(audio)
        print(raw_text)
        #        raw_text = input(">>> ")
        while not raw_text:
            print('Prompt should not be empty!')
            raw_text = input(">>> ")
        history.append(tokenizer.encode(raw_text))
        with torch.no_grad():
            out_ids = sample_sequence(personality, history, tokenizer, model,
                                      args)
        history.append(out_ids)
        history = history[-(2 * args.max_history + 1):]
        out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
        print(out_text)
        engine.say(out_text)
        engine.runAndWait()
Beispiel #14
0
class run:
    parser = ArgumentParser()
    parser.add_argument(
        "--dataset_path",
        type=str,
        default="",
        help="Path or url of the dataset. If empty download from S3.")
    parser.add_argument("--dataset_cache",
                        type=str,
                        default='./dataset_cache',
                        help="Path or url of the dataset cache")
    parser.add_argument(
        "--model",
        type=str,
        default="openai-gpt",
        help="Model type (openai-gpt or gpt2)",
        choices=['openai-gpt',
                 'gpt2'])  # anything besides gpt2 will load openai-gpt
    parser.add_argument("--model_checkpoint",
                        type=str,
                        default="",
                        help="Path, url or short name of the model")
    parser.add_argument(
        "--max_history",
        type=int,
        default=20,
        help="Number of previous utterances to keep in history")
    parser.add_argument("--device",
                        type=str,
                        default="cuda" if torch.cuda.is_available() else "cpu",
                        help="Device (cuda or cpu)")

    parser.add_argument("--no_sample",
                        action='store_true',
                        help="Set to use greedy decoding instead of sampling")
    parser.add_argument("--max_length",
                        type=int,
                        default=20,
                        help="Maximum length of the output utterances")
    parser.add_argument("--min_length",
                        type=int,
                        default=1,
                        help="Minimum length of the output utterances")
    parser.add_argument("--seed", type=int, default=0, help="Seed")
    parser.add_argument("--temperature",
                        type=int,
                        default=0.7,
                        help="Sampling softmax temperature")
    parser.add_argument(
        "--top_k",
        type=int,
        default=0,
        help="Filter top-k tokens before sampling (<=0: no filtering)")
    parser.add_argument(
        "--top_p",
        type=float,
        default=0.9,
        help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__file__)
    logger.info(pformat(args))

    if args.model_checkpoint == "":
        if args.model == 'gpt2':
            raise ValueError(
                "Interacting with GPT2 requires passing a finetuned model_checkpoint"
            )
        else:
            args.model_checkpoint = download_pretrained_model()

    if args.seed != 0:
        random.seed(args.seed)
        torch.random.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)

    logger.info("Get pretrained model and tokenizer")
    tokenizer_class, model_class = (
        GPT2Tokenizer,
        GPT2LMHeadModel) if args.model == 'gpt2' else (OpenAIGPTTokenizer,
                                                       OpenAIGPTLMHeadModel)
    tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)
    model = model_class.from_pretrained(args.model_checkpoint)
    model.to(args.device)
    add_special_tokens_(model, tokenizer)

    logger.info("Sample a personality")
    dataset = get_dataset(tokenizer, args.dataset_path, args.dataset_cache)
    #personalities = [dialog["personality"] for dataset in dataset.values() for dialog in dataset]
    #logger.info("Selected personality: %s", tokenizer.decode(chain(*personality)))

    history = []

    def process_text(self, raw_text):
        #personality = random.choice(self.personalities)
        personality = [
            'i am a robot.', 'my job is to give or deny permission.',
            'i love my job.', 'josh is my favorite person.',
            'my name is permissioner-bot.', 'i do not have a gender.'
        ]
        personality = [self.tokenizer.encode(line) for line in personality]
        self.history.append(self.tokenizer.encode(raw_text))
        with torch.no_grad():
            out_ids = sample_sequence(personality, self.history,
                                      self.tokenizer, self.model, self.args)
        self.history.append(out_ids)
        self.history = self.history[-(2 * self.args.max_history + 1):]
        out_text = self.tokenizer.decode(out_ids, skip_special_tokens=True)
        return out_text
        inputs=model.inputs,
        outputs=[output6])  # , output5, output4, output3, output2, output])


coco_path = '/media/hachreak/Magrathea/datasets/coco/coco'
model_path = ''
epochs = 100
batch_size = 3
input_shape = (320, 320, 3)
output_shape = (10, 10)

action = 'train'
# action = 'evaluate'

# validation dataset
dataset_val = u.get_dataset(coco_path, 'val')
gen_val = prepare(dataset_val, epochs, batch_size, input_shape, output_shape)

#  fuu = next(gen_val)
#  import ipdb; ipdb.set_trace()

# train dataset
dataset_train = u.get_dataset(coco_path, 'train')
gen_train = prepare(dataset_train, epochs, batch_size, input_shape,
                    output_shape)

callback = ModelSaveBestAvgAcc(filepath="model-{epoch:02d}-{avgacc:.2f}.hdf5",
                               verbose=True,
                               cond=filter_val('fmeasure'))

losses = []
def main(args: argparse.Namespace):
    logger = CompleteLogger(args.log, args.phase)
    print(args)

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    cudnn.benchmark = True

    # Data loading code
    train_transform = utils.get_train_transform(args.train_resizing,
                                                not args.no_hflip,
                                                args.color_jitter)
    val_transform = utils.get_val_transform(args.val_resizing)
    print("train_transform: ", train_transform)
    print("val_transform: ", val_transform)

    train_dataset, val_dataset, num_classes = utils.get_dataset(
        args.data, args.root, train_transform, val_transform, args.sample_rate,
        args.num_samples_per_classes)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              drop_last=True)
    train_iter = ForeverDataIterator(train_loader)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers)
    print("training dataset size: {} test dataset size: {}".format(
        len(train_dataset), len(val_dataset)))

    # create model
    print("=> using pre-trained model '{}'".format(args.arch))
    backbone = utils.get_model(args.arch, args.pretrained)
    backbone_source = utils.get_model(args.arch, args.pretrained)
    pool_layer = nn.Identity() if args.no_pool else None
    classifier = Classifier(backbone,
                            num_classes,
                            pool_layer=pool_layer,
                            finetune=args.finetune).to(device)
    source_classifier = Classifier(backbone_source,
                                   num_classes=backbone_source.fc.out_features,
                                   head=backbone_source.copy_head(),
                                   pool_layer=pool_layer).to(device)
    for param in source_classifier.parameters():
        param.requires_grad = False
    source_classifier.eval()

    # define optimizer and lr scheduler
    optimizer = SGD(classifier.get_parameters(args.lr),
                    momentum=args.momentum,
                    weight_decay=args.wd,
                    nesterov=True)
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                        args.lr_decay_epochs,
                                                        gamma=args.lr_gamma)

    # resume from the best checkpoint
    if args.phase == 'test':
        checkpoint = torch.load(logger.get_checkpoint_path('best'),
                                map_location='cpu')
        classifier.load_state_dict(checkpoint)
        acc1 = utils.validate(val_loader, classifier, args, device)
        print(acc1)
        return

    # create intermediate layer getter
    if args.arch == 'resnet50':
        return_layers = [
            'backbone.layer1.2.conv3', 'backbone.layer2.3.conv3',
            'backbone.layer3.5.conv3', 'backbone.layer4.2.conv3'
        ]
    elif args.arch == 'resnet101':
        return_layers = [
            'backbone.layer1.2.conv3', 'backbone.layer2.3.conv3',
            'backbone.layer3.5.conv3', 'backbone.layer4.2.conv3'
        ]
    else:
        raise NotImplementedError(args.arch)
    source_getter = IntermediateLayerGetter(source_classifier,
                                            return_layers=return_layers)
    target_getter = IntermediateLayerGetter(classifier,
                                            return_layers=return_layers)

    # get regularization
    if args.regularization_type == 'l2_sp':
        backbone_regularization = SPRegularization(source_classifier.backbone,
                                                   classifier.backbone)
    elif args.regularization_type == 'feature_map':
        backbone_regularization = BehavioralRegularization()
    elif args.regularization_type == 'attention_feature_map':
        attention_file = os.path.join(logger.root, args.attention_file)
        if not os.path.exists(attention_file):
            attention = calculate_channel_attention(train_dataset,
                                                    return_layers, num_classes,
                                                    args)
            torch.save(attention, attention_file)
        else:
            print("Loading channel attention from", attention_file)
            attention = torch.load(attention_file)
            attention = [a.to(device) for a in attention]
        backbone_regularization = AttentionBehavioralRegularization(attention)
    else:
        raise NotImplementedError(args.regularization_type)

    head_regularization = L2Regularization(
        nn.ModuleList([classifier.head, classifier.bottleneck]))

    # start training
    best_acc1 = 0.0

    for epoch in range(args.epochs):
        print(lr_scheduler.get_lr())
        # train for one epoch
        train(train_iter, classifier, backbone_regularization,
              head_regularization, target_getter, source_getter, optimizer,
              epoch, args)
        lr_scheduler.step()

        # evaluate on validation set
        acc1 = utils.validate(val_loader, classifier, args, device)

        # remember best acc@1 and save checkpoint
        torch.save(classifier.state_dict(),
                   logger.get_checkpoint_path('latest'))
        if acc1 > best_acc1:
            shutil.copy(logger.get_checkpoint_path('latest'),
                        logger.get_checkpoint_path('best'))
        best_acc1 = max(acc1, best_acc1)

    print("best_acc1 = {:3.1f}".format(best_acc1))
    logger.close()
def main():
    config = utils.parse_args()

    if config['cuda'] and torch.cuda.is_available():
        device = 'cuda:0'
    else:
        device = 'cpu'

    dataset_args = (config['task'], config['dataset'], config['dataset_path'],
                    'train', config['num_layers'], config['self_loop'],
                    config['normalize_adj'], config['transductive'])
    dataset = utils.get_dataset(dataset_args)
    loader = DataLoader(dataset=dataset,
                        batch_size=config['batch_size'],
                        shuffle=True,
                        collate_fn=dataset.collate_wrapper)
    input_dim, output_dim = dataset.get_dims()

    model = models.GAT(input_dim, config['hidden_dims'], output_dim,
                       config['num_heads'], config['dropout'], device)
    model.to(device)

    if not config['load']:
        criterion = utils.get_criterion(config['task'])
        optimizer = optim.Adam(model.parameters(),
                               lr=config['lr'],
                               weight_decay=config['weight_decay'])
        epochs = config['epochs']
        stats_per_batch = config['stats_per_batch']
        num_batches = int(ceil(len(dataset) / config['batch_size']))
        model.train()
        print('--------------------------------')
        print('Training.')
        for epoch in range(epochs):
            print('Epoch {} / {}'.format(epoch + 1, epochs))
            running_loss = 0.0
            num_correct, num_examples = 0, 0
            for (idx, batch) in enumerate(loader):
                features, node_layers, mappings, rows, labels = batch
                features, labels = features.to(device), labels.to(device)
                optimizer.zero_grad()
                out = model(features, node_layers, mappings, rows)
                loss = criterion(out, labels)
                loss.backward()
                optimizer.step()
                with torch.no_grad():
                    running_loss += loss.item()
                    predictions = torch.max(out, dim=1)[1]
                    num_correct += torch.sum(predictions == labels).item()
                    num_examples += len(labels)
                if (idx + 1) % stats_per_batch == 0:
                    running_loss /= stats_per_batch
                    accuracy = num_correct / num_examples
                    print('    Batch {} / {}: loss {}, accuracy {}'.format(
                        idx + 1, num_batches, running_loss, accuracy))
                    running_loss = 0.0
                    num_correct, num_examples = 0, 0
        print('Finished training.')
        print('--------------------------------')

        if config['save']:
            print('--------------------------------')
            directory = os.path.join(os.path.dirname(os.getcwd()),
                                     'trained_models')
            if not os.path.exists(directory):
                os.makedirs(directory)
            fname = utils.get_fname(config)
            path = os.path.join(directory, fname)
            print('Saving model at {}'.format(path))
            torch.save(model.state_dict(), path)
            print('Finished saving model.')
            print('--------------------------------')

    if config['load']:
        directory = os.path.join(os.path.dirname(os.getcwd()),
                                 'trained_models')
        fname = utils.get_fname(config)
        path = os.path.join(directory, fname)
        model.load_state_dict(torch.load(path))
    dataset_args = (config['task'], config['dataset'], config['dataset_path'],
                    'test', config['num_layers'], config['self_loop'],
                    config['normalize_adj'], config['transductive'])
    dataset = utils.get_dataset(dataset_args)
    loader = DataLoader(dataset=dataset,
                        batch_size=config['batch_size'],
                        shuffle=False,
                        collate_fn=dataset.collate_wrapper)
    criterion = utils.get_criterion(config['task'])
    stats_per_batch = config['stats_per_batch']
    num_batches = int(ceil(len(dataset) / config['batch_size']))
    model.eval()
    print('--------------------------------')
    print('Testing.')
    running_loss, total_loss = 0.0, 0.0
    num_correct, num_examples = 0, 0
    total_correct, total_examples = 0, 0
    for (idx, batch) in enumerate(loader):
        features, node_layers, mappings, rows, labels = batch
        features, labels = features.to(device), labels.to(device)
        out = model(features, node_layers, mappings, rows)
        loss = criterion(out, labels)
        running_loss += loss.item()
        total_loss += loss.item()
        predictions = torch.max(out, dim=1)[1]
        num_correct += torch.sum(predictions == labels).item()
        total_correct += torch.sum(predictions == labels).item()
        num_examples += len(labels)
        total_examples += len(labels)
        if (idx + 1) % stats_per_batch == 0:
            running_loss /= stats_per_batch
            accuracy = num_correct / num_examples
            print('    Batch {} / {}: loss {}, accuracy {}'.format(
                idx + 1, num_batches, running_loss, accuracy))
            running_loss = 0.0
            num_correct, num_examples = 0, 0
    total_loss /= num_batches
    total_accuracy = total_correct / total_examples
    print('Loss {}, accuracy {}'.format(total_loss, total_accuracy))
    print('Finished testing.')
    print('--------------------------------')
def train_student():

    #get args
    args = get_args()
    seed = set_seed(args.seed, args.use_cuda)

    trainset, testset, nr_channels, mlp_input_neurons, classes = get_dataset(
        args)

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.num_processes)

    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=1)

    #get student and teacher models
    student_model_class = get_model_class(args.student_model)
    teacher_model_class = get_model_class(args.teacher_model)
    if "MLP" in args.student_model:
        stud_model_simple = student_model_class(mlp_input_neurons, 10,
                                                args.dropout)
        stud_model_teacher = student_model_class(mlp_input_neurons, 10,
                                                 args.dropout)
        teacher_model = teacher_model_class(mlp_input_neurons, 10,
                                            args.dropout)
    else:
        stud_model_simple = student_model_class(nr_channels, 10, args.dropout)
        stud_model_teacher = student_model_class(nr_channels, 10, args.dropout)
        teacher_model = teacher_model_class(nr_channels, 10, args.dropout)

    print("Train student with teacher help")
    loss_epoch2, loss_values2, total_accuracy2 = train_student_teacher(
        stud_model_teacher, teacher_model, args, trainloader, testloader, seed)

    print("Train simple student")
    loss_epoch1, loss_values1, total_accuracy1 = train_student_normal(
        stud_model_simple, args, trainloader, testloader, seed)

    with open(
            "params" + args.dataset + '_' + args.teacher_model + '_' +
            str(seed), "rb") as f:
        _, epoch_eval_teacher, total_accuracy_teacher = pickle.load(f)

    #plot loss and total accuracy
    plt.figure(1)
    plt.plot(range(0, args.nr_epochs), loss_values1)
    plt.plot(range(0, args.nr_epochs), loss_values2)
    plt.legend(['student_simple', 'student_teacher'], loc='upper right')

    plt.xlabel('Nr Epochs')
    plt.ylabel('Loss function value')
    plt.title('Loss function comparison between students')
    plt.savefig('Loss_function_' + args.dataset + '_students' + str(seed) +
                "_" + str(args.id))

    plt.figure(2)
    plt.plot(loss_epoch1, total_accuracy1)
    plt.plot(loss_epoch2, total_accuracy2)
    plt.plot(epoch_eval_teacher, total_accuracy_teacher)
    plt.legend(['student_simple', 'student_teacher', 'teacher'],
               loc='lower right')

    plt.xlabel('Nr Epochs')
    plt.ylabel('Total accuracy')
    plt.title('Accuracy comparison between students')
    plt.savefig('Accuracy_' + args.dataset + '_students' + str(seed) + "_" +
                str(args.id))
def attack_run(model, adversary, hps):
    model.eval()
    dataset = get_dataset(data_name=hps.problem, train=False)
    # hps.n_batch_test = 1
    test_loader = DataLoader(dataset=dataset,
                             batch_size=hps.n_batch_test,
                             shuffle=False)

    test_clnloss = 0
    clncorrect = 0
    test_advloss = 0
    advcorrect = 0

    attack_path = os.path.join(hps.attack_dir, hps.attack)
    if not os.path.exists(attack_path):
        os.mkdir(attack_path)

    for batch_id, (clndata, target) in enumerate(test_loader):
        # Note that images are scaled to [-1.0, 1.0]
        clndata, target = clndata.to(hps.device), target.to(hps.device)
        path = os.path.join(attack_path, 'original_{}.png'.format(batch_id))
        save_image(clndata, path, normalize=True)

        with torch.no_grad():
            output = model(clndata)

        print('original logits ', output.detach().cpu().numpy())
        test_clnloss += F.cross_entropy(output, target, reduction='sum').item()
        pred = output.max(1, keepdim=True)[1]
        #print('pred: ', pred)
        clncorrect += pred.eq(target.view_as(pred)).sum().item()

        advdata = adversary.perturb(clndata, target)
        path = os.path.join(attack_path,
                            '{}perturbed_{}.png'.format(prefix, batch_id))
        save_image(advdata, path, normalize=True)

        with torch.no_grad():
            output = model(advdata)
        print('adv logits ', output.detach().cpu().numpy())

        test_advloss += F.cross_entropy(output, target, reduction='sum').item()
        pred = output.max(1, keepdim=True)[1]
        #print('pred: ', pred)
        advcorrect += pred.eq(target.view_as(pred)).sum().item()

        #if batch_id == 2:
        #    exit(0)
        break

    test_clnloss /= len(test_loader.dataset)
    print('Test set: avg cln loss: {:.4f},'
          ' cln acc: {}/{}'.format(test_clnloss, clncorrect,
                                   len(test_loader.dataset)))

    test_advloss /= len(test_loader.dataset)
    print('Test set: avg adv loss: {:.4f},'
          ' adv acc: {}/{}'.format(test_advloss, advcorrect,
                                   len(test_loader.dataset)))

    cln_acc = clncorrect / len(test_loader.dataset)
    adv_acc = advcorrect / len(test_loader.dataset)
    return cln_acc, adv_acc
Beispiel #20
0
from glob import glob
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint

import os

from tfrecord_parser import parse_tfrecords
from tfrecord_creator import create_tfrecords
from utils import get_dataset

print('TensorFlow', tf.__version__)

images_path, mask_path, num_classes, dataset_size = get_dataset(
    dataset_path='../mini', folder='training')

batch_size = 1
H, W = 512, 512

tfrecord_dir = os.path.join(os.getcwd(), 'tfrecords')
os.makedirs(tfrecord_dir, exist_ok=True)

checkpoint_dir = os.path.join(os.getcwd(), 'checkpoints')
os.makedirs(checkpoint_dir, exist_ok=True)

# create_tfrecords(images_path, mask_path, tfrecord_dir)

train_tfrecords = os.path.join(tfrecord_dir, 'train*.tfrecord')
input_function = parse_tfrecords(filenames=train_tfrecords,
                                 height=H,
                                 width=W,
                                 batch_size=batch_size)
def attack_run_rejection_policy(model, adversary, hps):
    """
    An attack run with rejection policy.
    :param model: Pytorch model.
    :param adversary: Advertorch adversary.
    :param hps: hyperparameters
    :return:
    """
    model.eval()

    # Get thresholds
    threshold_list1 = []
    threshold_list2 = []
    for label_id in range(hps.n_classes):
        # No data augmentation(crop_flip=False) when getting in-distribution thresholds
        dataset = get_dataset(data_name=hps.problem,
                              train=True,
                              label_id=label_id,
                              crop_flip=False)
        in_test_loader = DataLoader(dataset=dataset,
                                    batch_size=hps.n_batch_test,
                                    shuffle=False)

        print('Inference on {}, label_id {}'.format(hps.problem, label_id))
        in_ll_list = []
        for batch_id, (x, y) in enumerate(in_test_loader):
            x = x.to(hps.device)
            y = y.to(hps.device)
            ll = model(x)

            correct_idx = ll.argmax(dim=1) == y

            ll_, y_ = ll[correct_idx], y[
                correct_idx]  # choose samples are classified correctly
            in_ll_list += list(ll_[:, label_id].detach().cpu().numpy())

        thresh_idx = int(0.01 * len(in_ll_list))
        thresh1 = sorted(in_ll_list)[thresh_idx]
        thresh_idx = int(0.02 * len(in_ll_list))
        thresh2 = sorted(in_ll_list)[thresh_idx]
        threshold_list1.append(thresh1)  # class mean as threshold
        threshold_list2.append(thresh2)  # class mean as threshold
        print('1st & 2nd percentile thresholds: {:.3f}, {:.3f}'.format(
            thresh1, thresh2))

    # Evaluation
    dataset = get_dataset(data_name=hps.problem, train=False)
    # hps.n_batch_test = 1
    test_loader = DataLoader(dataset=dataset,
                             batch_size=hps.n_batch_test,
                             shuffle=False)

    n_correct = 0  # total number of correct classified samples by clean classifier
    n_successful_adv = 0  # total number of successful adversarial examples generated
    n_rejected_adv1 = 0  # total number of successfully rejected (successful) adversarial examples, <= n_successful_adv
    n_rejected_adv2 = 0  # total number of successfully rejected (successful) adversarial examples, <= n_successful_adv

    attack_path = os.path.join(hps.attack_dir, hps.attack)
    if not os.path.exists(attack_path):
        os.mkdir(attack_path)

    thresholds1 = torch.tensor(threshold_list1).to(hps.device)
    thresholds2 = torch.tensor(threshold_list2).to(hps.device)

    l2_distortion_list = []
    for batch_id, (x, y) in enumerate(test_loader):
        # Note that images are scaled to [0., 1.0]
        x, y = x.to(hps.device), y.to(hps.device)
        with torch.no_grad():
            output = model(x)

        pred = output.argmax(dim=1)
        correct_idx = pred == y
        x, y = x[correct_idx], y[
            correct_idx]  # Only evaluate on the correct classified samples by clean classifier.
        n_correct += correct_idx.sum().item()

        adv_x = adversary.perturb(x, y)
        with torch.no_grad():
            output = model(adv_x)

        diff = adv_x - x
        l2_distortion = diff.norm(p=2,
                                  dim=-1).mean().item()  # mean l2 distortion

        pred = output.argmax(dim=1)
        successful_idx = pred != y  # idx of successful adversarial examples.
        values, pred = output[successful_idx].max(dim=1)
        # confidence_idx = values >= thresholds[pred]
        reject_idx1 = values < thresholds1[
            pred]  # idx of successfully rejected samples.
        reject_idx2 = values < thresholds2[
            pred]  # idx of successfully rejected samples.

        # adv_correct += pred[confidence_idx].eq(y[confidence_idx]).sum().item()
        n_successful_adv += successful_idx.float().sum().item()
        n_rejected_adv1 += reject_idx1.float().sum().item()
        n_rejected_adv2 += reject_idx2.float().sum().item()

        l2_distortion_list.append(l2_distortion)
        if batch_id % 10 == 0:
            print('Evaluating on {}-th batch ...'.format(batch_id + 1))

    n = len(test_loader.dataset)
    reject_rate1 = n_rejected_adv1 / n_successful_adv
    reject_rate2 = n_rejected_adv2 / n_successful_adv
    success_adv_rate = n_successful_adv / n_correct
    print('Test set, clean classification accuracy: {}/{}={:.4f}'.format(
        n_correct, n, n_correct / n))
    print('success rate of adv examples generation: {}/{}={:.4f}'.format(
        n_successful_adv, n_correct, success_adv_rate))
    print('Mean L2 distortion of Adv Examples: {:.4f}'.format(
        np.mean(l2_distortion_list)))
    print('1st percentile, reject success rate: {}/{}={:.4f}'.format(
        n_rejected_adv1, n_successful_adv, reject_rate1))
    print('2nd percentile, reject success rate: {}/{}={:.4f}'.format(
        n_rejected_adv2, n_successful_adv, reject_rate2))
Beispiel #22
0
def main(args: argparse.Namespace):
    logger = CompleteLogger(args.log, args.phase)
    print(args)

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    cudnn.benchmark = True

    # Data loading code
    train_transform = utils.get_train_transform(args.train_resizing, random_horizontal_flip=True,
                                                random_color_jitter=False)
    val_transform = utils.get_val_transform(args.val_resizing)
    print("train_transform: ", train_transform)
    print("val_transform: ", val_transform)

    train_source_dataset, train_target_dataset, val_dataset, test_dataset, num_classes, args.class_names = \
        utils.get_dataset(args.data, args.root, args.source, args.target, train_transform, val_transform)
    train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)

    train_source_iter = ForeverDataIterator(train_source_loader)
    train_target_iter = ForeverDataIterator(train_target_loader)

    # create model
    print("=> using pre-trained model '{}'".format(args.arch))
    backbone = utils.get_model(args.arch)
    pool_layer = nn.Identity() if args.no_pool else None
    classifier = ImageClassifier(backbone, num_classes, bottleneck_dim=args.bottleneck_dim, pool_layer=pool_layer).to(device)
    domain_discri = DomainDiscriminator(in_feature=classifier.features_dim, hidden_size=1024).to(device)

    # define optimizer and lr scheduler
    optimizer = SGD(classifier.get_parameters() + domain_discri.get_parameters(),
                    args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)
    lr_scheduler = LambdaLR(optimizer, lambda x:  args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay))

    # define loss function
    domain_adv = DomainAdversarialLoss(domain_discri).to(device)

    # analysis the model
    if args.phase == 'analysis':
        # extract features from both domains
        feature_extractor = nn.Sequential(classifier.backbone, classifier.pool_layer, classifier.bottleneck).to(device)
        source_feature = collect_feature(train_source_loader, feature_extractor, device)
        target_feature = collect_feature(train_target_loader, feature_extractor, device)
        # plot t-SNE
        tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.png')
        tsne.visualize(source_feature, target_feature, tSNE_filename)
        print("Saving t-SNE to", tSNE_filename)
        # calculate A-distance, which is a measure for distribution discrepancy
        A_distance = a_distance.calculate(source_feature, target_feature, device)
        print("A-distance =", A_distance)
        return

    if args.phase == 'test':
        acc1 = validate(test_loader, classifier, args)
        print(acc1)
        return

    # start training
    best_h_score = 0.
    for epoch in range(args.epochs):
        # train for one epoch
        train(train_source_iter, train_target_iter, classifier, domain_adv, optimizer,
              lr_scheduler, epoch, args)

        # evaluate on validation set
        h_score = validate(val_loader, classifier, args)

        # remember best acc@1 and save checkpoint
        torch.save(classifier.state_dict(), logger.get_checkpoint_path('latest'))
        if h_score > best_h_score:
            shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))
        best_h_score = max(h_score, best_h_score)

    print("best_h_score = {:3.1f}".format(best_h_score))

    # evaluate on test set
    classifier.load_state_dict(torch.load(logger.get_checkpoint_path('best')))
    h_score = validate(test_loader, classifier, args)
    print("test_h_score = {:3.1f}".format(h_score))

    logger.close()
Beispiel #23
0
def main(args: argparse.Namespace):
    logger = CompleteLogger(args.log, args.phase)
    print(args)

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    cudnn.benchmark = True

    # Data loading code
    train_transform = utils.get_train_transform(args.train_resizing, random_horizontal_flip=not args.no_hflip,
                                                random_color_jitter=False, resize_size=args.resize_size,
                                                norm_mean=args.norm_mean, norm_std=args.norm_std)
    val_transform = utils.get_val_transform(args.val_resizing, resize_size=args.resize_size,
                                            norm_mean=args.norm_mean, norm_std=args.norm_std)
    print("train_transform: ", train_transform)
    print("val_transform: ", val_transform)

    train_source_dataset, train_target_dataset, val_dataset, test_dataset, num_classes, args.class_names = \
        utils.get_dataset(args.data, args.root, args.source, args.target, train_transform, val_transform)
    train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)

    train_source_iter = ForeverDataIterator(train_source_loader)
    train_target_iter = ForeverDataIterator(train_target_loader)

    # create model
    print("=> using model '{}'".format(args.arch))
    backbone = utils.get_model(args.arch, pretrain=not args.scratch)
    pool_layer = nn.Identity() if args.no_pool else None
    classifier = ImageClassifier(backbone, num_classes, args.num_blocks,
                                 bottleneck_dim=args.bottleneck_dim, dropout_p=args.dropout_p,
                                 pool_layer=pool_layer, finetune=not args.scratch).to(device)
    adaptive_feature_norm = AdaptiveFeatureNorm(args.delta).to(device)

    # define optimizer
    # the learning rate is fixed according to origin paper
    optimizer = SGD(classifier.get_parameters(), args.lr, weight_decay=args.weight_decay)

    # resume from the best checkpoint
    if args.phase != 'train':
        checkpoint = torch.load(logger.get_checkpoint_path('best'), map_location='cpu')
        classifier.load_state_dict(checkpoint)

    # analysis the model
    if args.phase == 'analysis':
        # extract features from both domains
        feature_extractor = nn.Sequential(classifier.backbone, classifier.pool_layer, classifier.bottleneck).to(device)
        source_feature = collect_feature(train_source_loader, feature_extractor, device)
        target_feature = collect_feature(train_target_loader, feature_extractor, device)
        # plot t-SNE
        tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.pdf')
        tsne.visualize(source_feature, target_feature, tSNE_filename)
        print("Saving t-SNE to", tSNE_filename)
        # calculate A-distance, which is a measure for distribution discrepancy
        A_distance = a_distance.calculate(source_feature, target_feature, device)
        print("A-distance =", A_distance)
        return

    if args.phase == 'test':
        acc1 = utils.validate(test_loader, classifier, args, device)
        print(acc1)
        return

    # start training
    best_acc1 = 0.
    for epoch in range(args.epochs):
        # train for one epoch
        train(train_source_iter, train_target_iter, classifier, adaptive_feature_norm, optimizer, epoch, args)

        # evaluate on validation set
        acc1 = utils.validate(val_loader, classifier, args, device)

        # remember best acc@1 and save checkpoint
        torch.save(classifier.state_dict(), logger.get_checkpoint_path('latest'))
        if acc1 > best_acc1:
            shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))
        best_acc1 = max(acc1, best_acc1)

    print("best_acc1 = {:3.1f}".format(best_acc1))

    # evaluate on test set
    classifier.load_state_dict(torch.load(logger.get_checkpoint_path('best')))
    acc1 = utils.validate(test_loader, classifier, args, device)
    print("test_acc1 = {:3.1f}".format(acc1))

    logger.close()
Beispiel #24
0
                                    'indicator_{}'.format(clean_method))
        utils.save_dfs(ind_train, ind_test, ind_path_pfx)


def clean(dataset, error_type=None):
    """ Clean each error in the dataset.
    
    Args:
        dataset (dict): dataset dict in dataset.py
    """
    print("- Clean dataset '{}'".format(dataset['data_dir']))
    for error in dataset['error_types']:
        if error_type is not None and error != error_type:
            continue
        print("    - Clean error type '{}'".format(error))
        clean_error(dataset, error)
    print("    - Finished")


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', default=None)
    args = parser.parse_args()

    # datasets to be cleaned, clean all datasets if not specified
    datasets = [utils.get_dataset(args.dataset)
                ] if args.dataset is not None else config.datasets

    # clean datasets
    for dataset in datasets:
        clean(dataset)
Beispiel #25
0
import numpy as np
import re
import time
import utils
from bpe import BPE

bpe = BPE()
bpe.load('data/words.bpe')
endToken = bpe.str_to_token['\n']
numTokens = len(bpe.str_to_token)

raw_data = utils.get_dataset(low_it=True)

SAVE_NAME = 'data/train_data_bpe_edition.npz'

DATA_TO_PROCESS = int(input('data to process, {} max? '.format(len(raw_data))))
BATCH_SIZE = int(input('batch size? '))

if DATA_TO_PROCESS > len(raw_data):
    DATA_TO_PROCESS = len(raw_data)

vocab = utils.vocab

xi, xp, y = utils.encodeData(raw_data[:DATA_TO_PROCESS], bpe, endToken)

print('to start processing {} of data, press any key'.format(len(xi)))
input()
print('{:=^40}'.format(' starting '))

start = time.time()
Beispiel #26
0
def main():

    parser = argparse.ArgumentParser(description='TrainingContainer')
    parser.add_argument('--algorithm-settings',
                        type=str,
                        default="",
                        help="algorithm settings")
    parser.add_argument('--search-space',
                        type=str,
                        default="",
                        help="search space for the neural architecture search")
    parser.add_argument('--num-layers',
                        type=str,
                        default="",
                        help="number of layers of the neural network")

    args = parser.parse_args()

    # Get Algorithm Settings
    algorithm_settings = args.algorithm_settings.replace("\'", "\"")
    algorithm_settings = json.loads(algorithm_settings)
    print(">>> Algorithm settings")
    for key, value in algorithm_settings.items():
        if len(key) > 13:
            print("{}\t{}".format(key, value))
        elif len(key) < 5:
            print("{}\t\t\t{}".format(key, value))
        else:
            print("{}\t\t{}".format(key, value))
    print()

    num_epochs = int(algorithm_settings["num_epochs"])

    w_lr = float(algorithm_settings["w_lr"])
    w_lr_min = float(algorithm_settings["w_lr_min"])
    w_momentum = float(algorithm_settings["w_momentum"])
    w_weight_decay = float(algorithm_settings["w_weight_decay"])
    w_grad_clip = float(algorithm_settings["w_grad_clip"])

    alpha_lr = float(algorithm_settings["alpha_lr"])
    alpha_weight_decay = float(algorithm_settings["alpha_weight_decay"])

    batch_size = int(algorithm_settings["batch_size"])
    num_workers = int(algorithm_settings["num_workers"])

    init_channels = int(algorithm_settings["init_channels"])

    print_step = int(algorithm_settings["print_step"])

    num_nodes = int(algorithm_settings["num_nodes"])
    stem_multiplier = int(algorithm_settings["stem_multiplier"])

    # Get Search Space
    search_space = args.search_space.replace("\'", "\"")
    search_space = json.loads(search_space)
    search_space = SearchSpace(search_space)

    # Get Num Layers
    num_layers = int(args.num_layers)
    print("Number of layers {}\n".format(num_layers))

    # Set GPU Device
    # Currently use only first available GPU
    # TODO: Add multi GPU support
    # TODO: Add functionality to select GPU
    all_gpus = list(range(torch.cuda.device_count()))
    if len(all_gpus) > 0:
        device = torch.device("cuda")
        torch.cuda.set_device(all_gpus[0])
        np.random.seed(2)
        torch.manual_seed(2)
        torch.cuda.manual_seed_all(2)
        torch.backends.cudnn.benchmark = True
        print(">>> Use GPU for Training <<<")
        print("Device ID: {}".format(torch.cuda.current_device()))
        print("Device name: {}".format(torch.cuda.get_device_name(0)))
        print("Device availability: {}\n".format(torch.cuda.is_available()))
    else:
        device = torch.device("cpu")
        print(">>> Use CPU for Training <<<")

    # Get dataset with meta information
    # TODO: Add support for more dataset
    input_channels, num_classes, train_data = utils.get_dataset()

    criterion = nn.CrossEntropyLoss().to(device)

    model = NetworkCNN(init_channels, input_channels, num_classes, num_layers,
                       criterion, search_space, num_nodes, stem_multiplier)

    model = model.to(device)

    # Weights optimizer
    w_optim = torch.optim.SGD(model.getWeights(),
                              w_lr,
                              momentum=w_momentum,
                              weight_decay=w_weight_decay)

    # Alphas optimizer
    alpha_optim = torch.optim.Adam(model.getAlphas(),
                                   alpha_lr,
                                   betas=(0.5, 0.999),
                                   weight_decay=alpha_weight_decay)

    # Split data to train/validation
    num_train = len(train_data)
    split = num_train // 2
    indices = list(range(num_train))

    train_sampler = torch.utils.data.sampler.SubsetRandomSampler(
        indices[:split])
    valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(
        indices[split:])

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=batch_size,
                                               sampler=train_sampler,
                                               num_workers=num_workers,
                                               pin_memory=True)

    valid_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=batch_size,
                                               sampler=valid_sampler,
                                               num_workers=num_workers,
                                               pin_memory=True)

    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optim,
                                                              num_epochs,
                                                              eta_min=w_lr_min)

    architect = Architect(model, w_momentum, w_weight_decay)

    # Start training
    best_top1 = 0.

    for epoch in range(num_epochs):
        lr_scheduler.step()
        lr = lr_scheduler.get_lr()[0]

        model.print_alphas()

        # Training
        print(">>> Training")
        train(train_loader, valid_loader, model, architect, w_optim,
              alpha_optim, lr, epoch, num_epochs, device, w_grad_clip,
              print_step)

        # Validation
        print("\n>>> Validation")
        cur_step = (epoch + 1) * len(train_loader)
        top1 = validate(valid_loader, model, epoch, cur_step, num_epochs,
                        device, print_step)

        # Print genotype
        genotype = model.genotype(search_space)
        print("\nModel genotype = {}".format(genotype))

        # Modify best top1
        if top1 > best_top1:
            best_top1 = top1
            best_genotype = genotype

    print("Final best Prec@1 = {:.4%}".format(best_top1))
    print("\nBest-Genotype={}".format(str(best_genotype).replace(" ", "")))
Beispiel #27
0
    feature_dim, temperature, tau_plus, k = args.feature_dim, args.temperature, args.tau_plus, args.k
    batch_size, epochs, estimator = args.batch_size, args.epochs, args.estimator
    dataset_name = args.dataset_name
    beta = args.beta
    anneal = args.anneal

    #configuring an adaptive beta if using annealing method
    if anneal == 'down':
        do_beta_anneal = True
        n_steps = 9
        betas = iter(np.linspace(beta, 0, n_steps))
    else:
        do_beta_anneal = False

    # data prepare
    train_data, memory_data, test_data = utils.get_dataset(dataset_name)

    train_loader = DataLoader(train_data,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=12,
                              pin_memory=True,
                              drop_last=True)
    memory_loader = DataLoader(memory_data,
                               batch_size=batch_size,
                               shuffle=False,
                               num_workers=12,
                               pin_memory=True)
    test_loader = DataLoader(test_data,
                             batch_size=batch_size,
                             shuffle=False,
Beispiel #28
0
def run_training(args):
    logging.set_verbosity(logging.WARNING)

    args = utils.dict_to_namedtuple(args)

    config = hparams_config.get_efficientdet_config(args.model_name)
    config.override(args.hparams, allow_new_keys=True)
    config.image_size = utils.parse_image_size(config.image_size)

    params = dict(
        config.as_dict(),
        seed=args.seed,
        batch_size=args.batch_size,
    )

    logging.info(params)

    if args.ckpt_dir:
        ckpt_dir = args.ckpt_dir
        if not tf.io.gfile.exists(ckpt_dir):
            tf.io.gfile.makedirs(ckpt_dir)
        config_file = os.path.join(ckpt_dir, "config.yaml")
        if not tf.io.gfile.exists(config_file):
            tf.io.gfile.GFile(config_file, "w").write(str(config))

    if params["seed"]:
        seed = params["seed"]
        os.environ["PYTHONHASHSEED"] = str(seed)
        tf.random.set_seed(seed)
        np.random.seed(seed)
        random.seed(seed)
        os.environ["TF_DETERMINISTIC_OPS"] = "1"
        os.environ["TF_CUDNN_DETERMINISTIC"] = "1"

    utils.setup_gpus()

    num_devices = 1
    physical_devices = tf.config.list_physical_devices("GPU")
    multi_gpu = args.multi_gpu
    if multi_gpu is not None and len(multi_gpu) != 1 and len(
            physical_devices) > 1:
        devices = [f"GPU:{gpu}"
                   for gpu in multi_gpu] if len(multi_gpu) != 0 else None
        strategy = tf.distribute.MirroredStrategy(devices)
        num_devices = len(devices) if devices else len(physical_devices)
    else:
        strategy = tf.distribute.get_strategy()

    train_dataset = utils.get_dataset(
        args,
        args.batch_size * num_devices,
        True,
        params,
        strategy if num_devices > 1 else None,
    )

    if args.eval_after_training or args.eval_during_training:
        eval_dataset = utils.get_dataset(
            args,
            num_devices,
            False,
            params,
            strategy if num_devices > 1 else None,
        )
        options = tf.data.Options()
        options.experimental_distribute.auto_shard_policy = (
            tf.data.experimental.AutoShardPolicy.DATA)
        eval_dataset = eval_dataset.with_options(options)

    with strategy.scope():
        model = efficientdet_net.EfficientDetNet(params=params)

        global_batch_size = args.batch_size * strategy.num_replicas_in_sync
        model.compile(optimizer=optimizers.get_optimizer(
            params, args.epochs, global_batch_size, args.train_steps))

        initial_epoch = args.initial_epoch
        if args.start_weights:
            image_size = params["image_size"]
            model.predict(np.zeros((1, image_size[0], image_size[1], 3)))
            model.load_weights(args.start_weights)
            fname = args.start_weights.split("/")[-1]
            ckpt_pattern = f"{args.model_name}\.(\d\d+)\.h5"
            match = re.match(ckpt_pattern, fname)
            if match:
                initial_epoch = int(match.group(1).lstrip("0"))

        callbacks = []

        if args.ckpt_dir:
            ckpt_dir = args.ckpt_dir
            if not tf.io.gfile.exists(ckpt_dir):
                tf.io.gfile.makedirs(tensorboard_dir)
            callbacks.append(
                tf.keras.callbacks.ModelCheckpoint(
                    filepath=os.path.join(
                        ckpt_dir, "".join([args.model_name,
                                           ".{epoch:02d}.h5"])),
                    save_weights_only=True,
                ))

        if args.log_dir:
            log_dir = args.log_dir
            if not tf.io.gfile.exists(log_dir):
                tf.io.gfile.makedirs(log_dir)
            callbacks.append(
                tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                               update_freq="epoch"))

        model.fit(
            train_dataset,
            epochs=args.epochs,
            steps_per_epoch=args.train_steps,
            initial_epoch=initial_epoch,
            callbacks=callbacks,
            validation_data=eval_dataset
            if args.eval_during_training else None,
            validation_steps=args.eval_steps,
            validation_freq=args.eval_freq,
        )

        if args.eval_after_training:
            print("Evaluation after training:")
            model.evaluate(eval_dataset, steps=args.eval_steps)

        model.save_weights(args.output_filename)
Beispiel #29
0
def main(args: argparse.Namespace):
    logger = CompleteLogger(args.log, args.phase)
    print(args)

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    cudnn.benchmark = True

    # Data loading code
    train_transform = utils.get_train_transform(args.train_resizing, random_horizontal_flip=not args.no_hflip,
                                                random_color_jitter=False, resize_size=args.resize_size,
                                                norm_mean=args.norm_mean, norm_std=args.norm_std)
    val_transform = utils.get_val_transform(args.val_resizing, resize_size=args.resize_size,
                                            norm_mean=args.norm_mean, norm_std=args.norm_std)
    print("train_transform: ", train_transform)
    print("val_transform: ", val_transform)

    train_source_dataset, train_target_dataset, val_dataset, test_dataset, num_classes, args.class_names = \
        utils.get_dataset(args.data, args.root, args.source, args.target, train_transform, val_transform)
    train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)

    train_source_iter = ForeverDataIterator(train_source_loader)
    train_target_iter = ForeverDataIterator(train_target_loader)

    # create model
    print("=> using model '{}'".format(args.arch))
    backbone = utils.get_model(args.arch, pretrain=not args.scratch)
    pool_layer = nn.Identity() if args.no_pool else None
    classifier = ImageClassifier(backbone, num_classes, bottleneck_dim=args.bottleneck_dim,
                                 pool_layer=pool_layer, finetune=not args.scratch).to(device)

    # define loss function
    if args.adversarial:
        thetas = [Theta(dim).to(device) for dim in (classifier.features_dim, num_classes)]
    else:
        thetas = None
    jmmd_loss = JointMultipleKernelMaximumMeanDiscrepancy(
        kernels=(
            [GaussianKernel(alpha=2 ** k) for k in range(-3, 2)],
            (GaussianKernel(sigma=0.92, track_running_stats=False),)
        ),
        linear=args.linear, thetas=thetas
    ).to(device)

    parameters = classifier.get_parameters()
    if thetas is not None:
        parameters += [{"params": theta.parameters(), 'lr': 0.1} for theta in thetas]

    # define optimizer
    optimizer = SGD(parameters, args.lr, momentum=args.momentum, weight_decay=args.wd, nesterov=True)
    lr_scheduler = LambdaLR(optimizer, lambda x:  args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay))

    # resume from the best checkpoint
    if args.phase != 'train':
        checkpoint = torch.load(logger.get_checkpoint_path('best'), map_location='cpu')
        classifier.load_state_dict(checkpoint)

    # analysis the model
    if args.phase == 'analysis':
        # extract features from both domains
        feature_extractor = nn.Sequential(classifier.backbone, classifier.pool_layer, classifier.bottleneck).to(device)
        source_feature = collect_feature(train_source_loader, feature_extractor, device)
        target_feature = collect_feature(train_target_loader, feature_extractor, device)
        # plot t-SNE
        tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.pdf')
        tsne.visualize(source_feature, target_feature, tSNE_filename)
        print("Saving t-SNE to", tSNE_filename)
        # calculate A-distance, which is a measure for distribution discrepancy
        A_distance = a_distance.calculate(source_feature, target_feature, device)
        print("A-distance =", A_distance)
        return

    if args.phase == 'test':
        acc1 = utils.validate(test_loader, classifier, args, device)
        print(acc1)
        return

    # start training
    best_acc1 = 0.
    for epoch in range(args.epochs):
        # train for one epoch
        train(train_source_iter, train_target_iter, classifier, jmmd_loss, optimizer,
              lr_scheduler, epoch, args)

        # evaluate on validation set
        acc1 = utils.validate(val_loader, classifier, args, device)

        # remember best acc@1 and save checkpoint
        torch.save(classifier.state_dict(), logger.get_checkpoint_path('latest'))
        if acc1 > best_acc1:
            shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))
        best_acc1 = max(acc1, best_acc1)

    print("best_acc1 = {:3.1f}".format(best_acc1))

    # evaluate on test set
    classifier.load_state_dict(torch.load(logger.get_checkpoint_path('best')))
    acc1 = utils.validate(test_loader, classifier, args, device)
    print("test_acc1 = {:3.1f}".format(acc1))

    logger.close()
if __name__ == '__main__':
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../logs')

    args = args_parser()
    exp_details(args)

    if args.gpu:
        torch.cuda.set_device(args.gpu)
    device = 'cuda' if args.gpu else 'cpu'

    # load dataset and user groups
    train_dataset, test_dataset, user_groups = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netork
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)

    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
def user_annotation():
    """
    Feed tweets to console one at a time, and ask user for sentiment annotation.
    """
    dataset = utils.select_dataset()
    text_tweets = utils.get_dataset(dataset)
    tweets = []
    for text_tweet in text_tweets:
        tweets.append(tweet.to_tweet(text_tweet))
    username = raw_input("Name? ... ")
    
    print "\n--------------\n"
    print "Input: "
    print "\n1: Negative sentiment (Negative opinion). \n2: Neutral/objective sentiment (No opinion). \n3: Positive sentiment (Positive opinion). \n5: Delete the tweet from the dataset. \nx: Cancel sequence. 0: Go back to previous tweet. "
    print "\n--------------\n"
    
    annotated_to = 0
    i = 0
    while i < len(tweets):
#        tweets[i].text.encode('utf8')
#        text = tweets[i].text
#        tweets[i].text = text.decode('utf8')
        try:
            print "Tweet nr. : "+str(i+1)
            print str(((i+1.0*1.0)/len(tweets)*1.0)*100)+" % done "
            print unicode(tweets[i].__str__().decode('utf8'))
        except UnicodeEncodeError:
            try:
                print "Tweet nr. : "+str(i+1)
                print str(tweets[i])
            except UnicodeEncodeError:
                print "Could not print tweet number "+str(i+1) +". Deleting tweet..."
                tweets.remove(tweets[i])
                continue
        
        userinput = raw_input("...")
        while not legal_input(userinput):
            userinput = raw_input("Unlawful input! Please re-introduce.")
        if userinput is '1':
            tweets[i].set_sentiment("negative")
        elif userinput is '2':
            tweets[i].set_sentiment("neutral")
        elif userinput is '3':
            tweets[i].set_sentiment("positive")
        elif userinput is '5':
            print "Deleting tweet..."
            tweets.remove(tweets[i])
            continue
        elif userinput is '0':
            i = i-1
            continue
        elif userinput is 'x':
            break
        i = i+1
        
        
    #TODO: need to encode to utf when getting from dataset?!?!
    #Store the sentiment in file!
    tweetlines = []
    for t in tweets[:i]:
        if t.get_sentiment() is None:
            continue
        tweetlines.append(t.to_tsv())
    dir = username+"_annotated_data"
    if not os.path.exists(dir):
        os.makedirs(dir)
    utils.store_dataset(tweetlines, dir+dataset[4:])
    
    print "Domo arigato!" 
if __name__ == '__main__':
    start_time = time.time()

    # define paths
    path_project = os.path.abspath('..')
    logger = SummaryWriter('../brats2018_logs')

    args = brats2018_args_parser()
    exp_details(args)

    device = torch.device(args.gpu) if args.gpu is not None else 'cpu'
    if args.gpu:
        torch.cuda.set_device(device)

    # load dataset and user groups
    train_dataset, _, user_groups = get_dataset(args)

    # 使用 UNET
    if args.model == 'unet':
        global_model = UNet(n_channels=1, n_classes=1, bilinear=True)
    else:
        exit('Error: unrecognized model')

    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)

    # copy weights
    global_weights = global_model.state_dict()
Beispiel #33
0
import matplotlib.pyplot as plt

import torch
from torch.utils.data import DataLoader

from utils import get_dataset
from options import args_parser
from update import test_inference
from models import MLP, CNNMnist, CNNFashion_Mnist, CNNCifar

if __name__ == '__main__':
    args = args_parser()
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    # load datasets
    train_dataset, test_dataset, _ = get_dataset(args)

    # BUILD MODEL
    if args.model == 'cnn':
        # Convolutional neural netorks
        if args.dataset == 'mnist':
            global_model = CNNMnist(args=args)
        elif args.dataset == 'fmnist':
            global_model = CNNFashion_Mnist(args=args)
        elif args.dataset == 'cifar':
            global_model = CNNCifar(args=args)
    elif args.model == 'mlp':
        # Multi-layer preceptron
        img_size = train_dataset[0][0].shape
        len_in = 1
        for x in img_size:
Beispiel #34
0
from sklearn import cross_validation
from sklearn import grid_search

from plot import plot_boundary
from utils import get_dataset
from utils import get_random_state
from utils import compare

if __name__ == "__main__":

    # (Question 1) dt.py: Decision tree
    SAMPLE_NUMBER = 2000
    TRAIN_SET_SAMPLE_NUM = 150

    X, y = get_dataset(SAMPLE_NUMBER)

    X_train, y_train = X[:TRAIN_SET_SAMPLE_NUM], y[:TRAIN_SET_SAMPLE_NUM]
    X_test, y_test = X[TRAIN_SET_SAMPLE_NUM:], y[TRAIN_SET_SAMPLE_NUM:]

    # 1.
    decisionTreeClassifier = DecisionTreeClassifier(random_state=get_random_state())
    decisionTreeClassifier.fit(X_train, y_train)
    y_dtc = decisionTreeClassifier.predict(X_test)

    # Plot
    plot_boundary("1-1-Ground-Truth", decisionTreeClassifier, X_test, y_test, title="Ground Truth data")
    plot_boundary("1-1-Prediction", decisionTreeClassifier, X_test, y_dtc, title="Prediction data")

    # 2.
    max_depths = [i for i in range(1, 20)]
Beispiel #35
0
def main():
    parser = argparse.ArgumentParser(description='-----[CNN-classifier]-----')
    parser.add_argument(
        '--mode',
        default='train',
        help='train: train (with test) a model / test: test saved models')
    parser.add_argument(
        '--model',
        default='non-static',
        help='available models: rand, static, non-static, multichannel')
    parser.add_argument('--dataset',
                        default='TREC',
                        help='available datasets: MR, TREC, AG, SST1, SST2')
    parser.add_argument('--save_model',
                        default=False,
                        action='store_true',
                        help='whether saving model or not')
    parser.add_argument('--early_stopping',
                        default=False,
                        action='store_true',
                        help='whether to apply early stopping')
    parser.add_argument('--epoch',
                        default=100,
                        type=int,
                        help='number of max epoch')
    parser.add_argument('--learning_rate',
                        default=1.0,
                        type=float,
                        help='learning rate')
    parser.add_argument('--gpu',
                        default=-1,
                        type=int,
                        help='the number of gpu to be used')
    parser.add_argument('--cv',
                        default=False,
                        action='store_true',
                        help='whether to use cross validation')
    parser.add_argument(
        '--we',
        default='w2v',
        help='available word embedding: w2v, rand, scv, yelpfscv')
    parser.add_argument('--type',
                        default='CNN',
                        help='available type for cnn model: CNN, CNN2')
    parser.add_argument('--batch_size',
                        default=50,
                        type=int,
                        help='batch_size')
    parser.add_argument('--filters',
                        default=[3, 4, 5],
                        type=int,
                        nargs='+',
                        help='filters')
    parser.add_argument('--filter_num',
                        default=[100, 100, 100],
                        type=int,
                        nargs='+',
                        help='filter_num')
    parser.add_argument('--dropout_prob',
                        default=0.5,
                        type=float,
                        help='dropout_prob')
    parser.add_argument('--norm_limit',
                        default=3,
                        type=float,
                        help='norm_limit')
    parser.add_argument('--result_path',
                        default='result/result_auto.csv',
                        help='result_path')

    options = parser.parse_args()

    data = utils.get_dataset(options.dataset)
    # data = utils.get_dataset('MR')
    params = {
        'MODEL':
        options.model,
        'DATASET':
        options.dataset,
        'SAVE_MODEL':
        options.save_model,
        'EARLY_STOPPING':
        options.early_stopping,
        'EPOCH':
        options.epoch,
        'LEARNING_RATE':
        options.learning_rate,
        'MAX_SENT_LEN':
        max([
            len(sent)
            for sent in data['train_x'] + data['dev_x'] + data['test_x']
        ]),
        'BATCH_SIZE':
        options.batch_size,
        'WORD_DIM':
        300,
        'VOCAB_SIZE':
        len(data['vocab']),
        'CLASS_SIZE':
        len(data['classes']),
        'FILTERS':
        options.filters,
        'FILTER_NUM':
        options.filter_num,
        'DROPOUT_PROB':
        options.dropout_prob,
        'NORM_LIMIT':
        options.norm_limit,
        'GPU':
        options.gpu,
        'cv':
        options.cv,
        'we':
        options.we,
        'type':
        options.type,
        'result_path':
        options.result_path
    }

    print('=' * 20 + 'INFORMATION' + '=' * 20)
    print('MODEL:', params['MODEL'])
    print('DATASET:', params['DATASET'])
    print('VOCAB_SIZE:', params['VOCAB_SIZE'])
    print('EPOCH:', params['EPOCH'])
    print('LEARNING_RATE:', params['LEARNING_RATE'])
    print('EARLY_STOPPING:', params['EARLY_STOPPING'])
    print('SAVE_MODEL:', params['SAVE_MODEL'])
    print('WORD EMBEDDING:', params['we'])
    print('MODEL TYPE:', params['type'])
    print('BATCH_SIZE', params['BATCH_SIZE'])
    print('FILTERS', params['FILTERS'])
    print('FILTER_NUM', params['FILTER_NUM'])
    print('DROPOUT_PROB', params['DROPOUT_PROB'])
    print('NORM_LIMIT', params['NORM_LIMIT'])
    print('=' * 20 + 'INFORMATION' + '=' * 20)

    if options.mode == 'train':
        print('=' * 20 + 'TRAINING STARTED' + '=' * 20)
        if params['cv']:
            kf = KFold(n_splits=10, shuffle=True)
            for index, (train_index,
                        test_index) in enumerate(kf.split(data['x'])):
                print('cv {}'.format(index))
                train_index = shuffle(train_index)
                dev_size = len(train_index) // 10
                dev_index = train_index[:dev_size]
                train_index = train_index[dev_size:]
                data['dev_x'] = list(itemgetter(*dev_index)(data['x']))
                data['train_x'] = list(itemgetter(*train_index)(data['x']))
                data['test_x'] = list(itemgetter(*test_index)(data['x']))
                data['dev_y'] = list(itemgetter(*dev_index)(data['y']))
                data['train_y'] = list(itemgetter(*train_index)(data['y']))
                data['test_y'] = list(itemgetter(*test_index)(data['y']))
                train(data, params)
        else:
            model = train(data, params)
            if params['SAVE_MODEL']:
                utils.save_model(model, params)
        print('=' * 20 + 'TRAINING FINISHED' + '=' * 20)
    else:
        if params['GPU'] == -1:
            model = utils.load_model(params)
        else:
            model = utils.load_model(params).cuda(params['GPU'])

        test_acc = test(data, model, params)
        print('test acc:', test_acc)
def run(args: DictConfig) -> None:
    # cuda_available = torch.cuda.is_available()
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    # device = "cuda" if cuda_available and args.device == 'cuda' else "cpu"

    classifier = eval(args.classifier_name)(args.width,
                                            args.n_classes).to(args.device)
    logger.info('Classifier: {}, width: {}, # parameters: {}'.format(
        args.classifier_name, args.width, cal_parameters(classifier)))

    data_dir = hydra.utils.to_absolute_path(args.data_dir)
    train_data = get_dataset(data_name=args.dataset,
                             data_dir=data_dir,
                             train=True,
                             crop_flip=True)
    test_data = get_dataset(data_name=args.dataset,
                            data_dir=data_dir,
                            train=False,
                            crop_flip=False)

    test_loader = DataLoader(dataset=test_data,
                             batch_size=args.n_batch_test,
                             shuffle=False)

    optimizer = SGD(classifier.parameters(),
                    lr=args.lr_max,
                    momentum=args.momentum,
                    weight_decay=args.weight_decay)

    def run_forward(scheduler):
        optimal_loss = 1e5
        for epoch in range(1, args.n_epochs + 1):
            loss, acc = train_epoch(classifier,
                                    train_loader,
                                    args,
                                    optimizer,
                                    scheduler=scheduler)
            if loss < optimal_loss:
                optimal_loss = loss
                torch.save(classifier.state_dict(), checkpoint)
            logger.info(
                'Epoch {}, lr: {:.4f}, loss: {:.4f}, acc: {:.4f}'.format(
                    epoch,
                    scheduler.get_lr()[0], loss, acc))

    if args.adv_generation:
        checkpoint = '{}_w{}_at_fast.pth'.format(args.classifier_name,
                                                 args.width)
        train_loader = DataLoader(dataset=train_data,
                                  batch_size=args.n_batch_train,
                                  shuffle=True)
        lr_steps = args.n_epochs * len(train_loader)
        scheduler = lr_scheduler.CyclicLR(optimizer,
                                          base_lr=args.lr_min,
                                          max_lr=args.lr_max,
                                          step_size_up=lr_steps / 2,
                                          step_size_down=lr_steps / 2)

        run_forward(scheduler)

        clean_loss, clean_acc = eval_epoch(classifier,
                                           test_loader,
                                           args,
                                           adversarial=False)
        adv_loss, adv_acc = eval_epoch(classifier,
                                       test_loader,
                                       args,
                                       adversarial=True,
                                       save=True)
        logger.info('Clean loss: {:.4f}, acc: {:.4f}'.format(
            clean_loss, clean_acc))
        logger.info('Adversarial loss: {:.4f}, acc: {:.4f}'.format(
            adv_loss, adv_acc))

    else:
        n = len(train_data)
        split_size = n // args.n_split
        lengths = [split_size] * (args.n_split - 1) + [
            n % split_size + split_size
        ]
        datasets_list = random_split(train_data, lengths=lengths)

        for split_id, dataset in enumerate(datasets_list):
            checkpoint = '{}_w{}_split{}_at_fast.pth'.format(
                args.classifier_name, args.width, split_id)
            logger.info('Running on subset {}, size: {}'.format(
                split_id + 1, len(dataset)))
            train_loader = DataLoader(dataset=dataset,
                                      batch_size=args.n_batch_train,
                                      shuffle=True)

            lr_steps = args.n_epochs * len(train_loader)
            scheduler = lr_scheduler.CyclicLR(optimizer,
                                              base_lr=args.lr_min,
                                              max_lr=args.lr_max,
                                              step_size_up=lr_steps / 2,
                                              step_size_down=lr_steps / 2)

            run_forward(scheduler)

            clean_loss, clean_acc = eval_epoch(classifier,
                                               test_loader,
                                               args,
                                               adversarial=False)
            adv_loss, adv_acc = eval_epoch(classifier,
                                           test_loader,
                                           args,
                                           adversarial=True)
            logger.info('Clean loss: {:.4f}, acc: {:.4f}'.format(
                clean_loss, clean_acc))
            logger.info('Adversarial loss: {:.4f}, acc: {:.4f}'.format(
                adv_loss, adv_acc))
Beispiel #37
0
def main(args):

    #network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    utils.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    utils.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)

    train_set = utils.get_dataset(args.data_dir)
    nrof_classes = len(train_set)
    print('nrof_classes: ', nrof_classes)
    image_list, label_list = utils.get_image_paths_and_labels(train_set)
    image_list = np.array(image_list)
    print('total images: {}'.format(len(image_list)))
    label_list = np.array(label_list, dtype=np.int32)

    dataset_size = len(image_list)
    data_reader = DataGenerator(image_list, label_list, args.batch_size)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    if args.pretrained_model:
        print('Pre-trained model: %s' %
              os.path.expanduser(args.pretrained_model))

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False, name='global_step')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')
        images_placeholder = tf.placeholder(tf.float32, [None, 112, 96, 3],
                                            name='images_placeholder')
        labels_placeholder = tf.placeholder(tf.int32, [None],
                                            name='labels_placeholder')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        print('Using optimizer: {}'.format(args.optimizer))
        if args.optimizer == 'ADAGRAD':
            opt = tf.train.AdagradOptimizer(learning_rate)
        elif args.optimizer == 'MOM':
            opt = tf.train.MomentumOptimizer(learning_rate, 0.9)

        if args.network == 'sphere_network':
            prelogits = network.infer(images_placeholder)
        else:
            raise Exception('Not supported network: {}'.format(args.loss_type))

        if args.loss_type == 'softmax':
            cross_entropy_mean = utils.softmax_loss(prelogits,
                                                    labels_placeholder,
                                                    len(train_set),
                                                    args.weight_decay, False)
            regularization_losses = tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES)
            #loss = cross_entropy_mean + args.weight_decay*tf.add_n(regularization_losses)
            loss = cross_entropy_mean + args.weight_decay * tf.add_n(
                regularization_losses)
            #loss = cross_entropy_mean
        else:
            raise Exception('Not supported loss type: {}'.format(
                args.loss_type))

        #loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
        losses = {}
        losses['total_loss'] = loss
        losses['softmax_loss'] = cross_entropy_mean
        debug_info = {}
        debug_info['prelogits'] = prelogits

        grads = opt.compute_gradients(loss, tf.trainable_variables())
        train_op = opt.apply_gradients(grads, global_step=global_step)

        #save_vars = [var for var in tf.global_variables() if 'Adagrad' not in var.name and 'global_step' not in var.name]
        save_vars = tf.global_variables()

        #saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
        saver = tf.train.Saver(save_vars, max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                allow_soft_placement=True))

        # Initialize variables
        sess.run(tf.global_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})
        sess.run(tf.local_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})

        with sess.as_default():
            #pdb.set_trace()

            if args.pretrained_model:
                print('Restoring pretrained model: %s' % args.pretrained_model)
                saver.restore(sess, os.path.expanduser(args.pretrained_model))

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size

                # Train for one epoch
                train(args, sess, epoch, images_placeholder,
                      labels_placeholder, data_reader, debug,
                      learning_rate_placeholder, global_step, losses, train_op,
                      args.learning_rate_schedule_file)

                # Save variables and the metagraph if it doesn't exist already
                model_dir = args.models_base_dir
                checkpoint_path = os.path.join(model_dir,
                                               'model-%s.ckpt' % 'softmax')
                saver.save(sess,
                           checkpoint_path,
                           global_step=step,
                           write_meta_graph=False)

                # Evaluate on LFW
    return model_dir