Beispiel #1
0
def load_dataset(dataset_name, split=0):
    dataset = DatasetLoader(dataset_name, '../../datasets/')

    data = dataset.get_data(split=split)
    X, Y, Xs, Ys, Y_mean, Y_std = [data[_] for _ in ['X', 'Y', 'Xs', 'Ys', 'Y_mean', 'Y_std']]

    return [X, Y, Xs, Ys, Y_mean, Y_std]
    def train(self,
              images,
              labels,
              output_models,
              name_model,
              clazzes,
              validation_split,
              size_image,
              dimension,
              batch_size=8,
              epochs=20):

        input_shape = (size_image[0], size_image[1], dimension)

        dataset = DatasetLoader()
        images, labels = dataset.reshape_base(images, labels, input_shape,
                                              len(clazzes))

        self.build_network(input_shape, len(clazzes))

        filepath = path.join(output_models,
                             "weights.{epoch:02d}-{val_loss:.2f}.hdf5")
        checkpoint = ModelCheckpoint(filepath,
                                     monitor='loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min')
        earlystopping = EarlyStopping(monitor='loss',
                                      min_delta=0.01,
                                      patience=6,
                                      verbose=0,
                                      mode='auto')

        callbacks_list = [checkpoint, earlystopping]

        t = threading.Thread(target=self.clear_files, args=(output_models, ))
        t.start()

        self.model.fit(
            images,
            labels,
            batch_size=batch_size,
            callbacks=callbacks_list,
            epochs=epochs,
            verbose=1,
            #validation_data=(self.data_test, self.labels_test))
            validation_split=validation_split)

        self.thread_flag = 1
        print("\nThread Status: ", t.is_alive())
        while t.is_alive():
            pass

        print("\nThread Status: ", t.is_alive())
Beispiel #3
0
def make_adj_list(distance_metric='distance'):
    
    if distance_metric not in ['distance','time-distance', 'network-distance']:
        
        print("Distance metric not valid, please choose 'distance','time-distance' or 'network-distance'. ")
        return None
    
    vertices_dataset_loader = DatasetLoader('coordinates')
    
    if distance_metric in ['distance', 'time-distance']:
        edges_dataset_loader = DatasetLoader(distance_metric)
    else:
        edges_dataset_loader = DatasetLoader('distance')# The actual distance will be ignored in this case

    vertices_df = vertices_dataset_loader.dataset #Load the vertices dataset
    edges_df = edges_dataset_loader.dataset # Then the vertices dataset.
    
    
    file_path = str('./adj_list_%s.pkl' % distance_metric)
    adj_list = None
    
    if os.path.isfile(file_path):
        adj_list = pickle.load(open(file_path,'rb')) # The file exists, no need to build it from scratch.
        
    else:
        # Important! 
        # Since there might be some disconnected vertices, we need the vertices dataset 
        adj_list = {i:{} for i in vertices_df.index}
        
        print_progress = True # Set this flag to True to print progress percentage
        
        dataset_size = len(edges_df)
        for index, edge in edges_df.iterrows():
            
            if index % 10000 == 0 and print_progress:
                print('%4f' % (index/dataset_size * 100))
                
            if distance_metric == 'network-distance':
                node1, node2, dist = edge['node-id-1'],edge['node-id-2'], 1 # Where 1 represent 1 hop from point to point
            else:
                node1, node2, dist = edge['node-id-1'],edge['node-id-2'], edge['distance'] 
            
            if node1 in adj_list.keys():
                adj_list[node1][node2] = dist
            
        ''' Write to file '''
        f = open(file_path, 'wb')
        pickle.dump(adj_list, f)
        f.close()

    return adj_list
Beispiel #4
0
 def __init__(self,
              qa_system_type,
              internal_representation_type,
              min_similarity=0,
              search_for_body_parts=True,
              pruning=True,
              filter_unuseful_words_from_tokens=True,
              number_glove_words=COM.DEFAULT_NUM_IMPORTED_GLOVE_VECTORS,
              dimension_glove_vectors=COM.POSSIBLE_GLOVE_DIMENSIONS[COM.DEFAULT_GLOVE_DIMENSION]):
   # initialize QA system
   if qa_system_type in COM.QA_SYSTEM_TYPES:
     self.__qas = QASystem(qa_system_type)
   else:
     raise Exception("qa_system argument should be an element in this list :" + COM.QA_SYSTEM_TYPES)
   
   self.__search_for_body_parts = search_for_body_parts
   
   self.__pruning = pruning
   
   self.__filter_unuseful_words = filter_unuseful_words_from_tokens
   
   # initialize SentencePOSTagger
   self.__spt = SentencePOSTagger()
   
   #initialize Tokenizer
   self.__tokenizer = Tokenizer()
   
   # initialize answer interpreter
   self.__ai = AnswersInterpreter(spt=self.__spt)
   
   # initialize vectorifier
   self.__vect = Vectorifier(internal_representation_type,
                             number_glove_words,
                             dimension_glove_vectors,
                             self.__spt)
   
   # initialize symptom tree
   self.__st = SymptomTree(self.__vect)
   
   # initialize dataset loader
   self.__dl = DatasetLoader()
   
   if 0 <= min_similarity <= 1:
     if str(min_similarity) == "0.0":
       self.__min_similarity = 0
     else:
       self.__min_similarity = min_similarity
   else:
     raise Exception("min_similarity must be a number >= 0 and <= 1")
Beispiel #5
0
def vis_1(adj_list, start, neighbours, distance_metric='distance'):
    coordinates = DatasetLoader('coordinates').get_dataset()

    #lst = get_neighbourhood(adj_list, start, curr_d, d)
    # Initialize the graph from the result of Functionality1 and the coordinates for the positions
    cnod = []
    clat = []
    clon = []
    graph = nx.Graph()
    graph.add_nodes_from([start])
    graph.add_nodes_from(neighbours)

    all_nodes = [start] + neighbours

    for node_id in all_nodes:
        graph.add_edges_from([(node_id, key, {
            'distance': value
        }) for key, value in adj_list[node_id].items() if key in all_nodes])

        cnod.append(coordinates[coordinates['node-id'] == node_id]
                    ['node-id'].values[0])
        clat.append(coordinates[coordinates['node-id'] == node_id]
                    ['latitude'].values[0])
        clon.append(coordinates[coordinates['node-id'] == node_id]
                    ['longitude'].values[0])

    # Change shapes and colors for nodes/edges
    color_edge = ''
    shape_node = ''
    size_font = ''

    if distance_metric == 'distance':
        color_edge = 'red'
        shape_node = 'o'
    elif distance_metric == 'time_distance':
        color_edge = 'purple'
        shape_node = 'D'
    else:
        color_edge = 'blue'
        shape_node = '^'

    if len(neighbours) <= 10:
        size_font = 10
    else:
        size_font = 9

    # Visualize the Map, highlighting the Input-Node
    pos = {cnod[i]: [clat[i], clon[i]] for i in range(len(all_nodes))}

    color_map = ['yellow'] + (['lightgreen'] * (len(all_nodes) - 1))
    nx.draw(graph,
            pos,
            node_color=color_map,
            node_size=150,
            node_shape=shape_node,
            edge_color=color_edge,
            with_labels=True,
            font_size=size_font)

    plt.show()
Beispiel #6
0
def evaluate(args,model,processor):
    eval_dataset = load_and_cache_examples(args,processor, data_type='dev')
    eval_dataloader = DatasetLoader(data=eval_dataset, batch_size=args.batch_size,
                                 shuffle=False, seed=args.seed, sort=False,
                                 vocab=processor.vocab, label2id=args.label2id)
    pbar = ProgressBar(n_total=len(eval_dataloader), desc="Evaluating")
    metric = SeqEntityScore(args.id2label,markup=args.markup)
    eval_loss = AverageMeter()
    model.eval()
    with torch.no_grad():
        for step, batch in enumerate(eval_dataloader):
            input_ids, input_mask, input_tags, input_lens = batch
            input_ids = input_ids.to(args.device)
            input_mask = input_mask.to(args.device)
            input_tags = input_tags.to(args.device)
            features, loss = model.forward_loss(input_ids, input_mask, input_lens, input_tags)
            eval_loss.update(val=loss.item(), n=input_ids.size(0))
            tags, _ = model.crf._obtain_labels(features, args.id2label, input_lens)
            input_tags = input_tags.cpu().numpy()
            target = [input_[:len_] for input_, len_ in zip(input_tags, input_lens)]
            metric.update(pred_paths=tags, label_paths=target)
            pbar(step=step)
    print(" ")
    eval_info, class_info = metric.result()
    eval_info = {f'eval_{key}': value for key, value in eval_info.items()}
    result = {'eval_loss': eval_loss.avg}
    result = dict(result, **eval_info)
    return result, class_info
Beispiel #7
0
    def run(self):
        """Performs various stages in predictive modeling"""
        #Path to Data set.
        path = "../../neeraj/resource/pima-indians-diabetes.data"
        #Column names of Data set.
        column_names = [ ' preg ' , ' plas ' , ' pres ' , ' skin ' , ' test ' , ' mass ' , ' pedi ' , ' age ' , ' class ' ]
        #Loading Data set using class DatasetLoader.
        load_data = DatasetLoader(path, column_names)
        data = load_data.load()
        load_data.print_shape(data)

        #Understanding data using class DataExplorer.
        explore_data = DataExplorer()
        explore_data.print_data_statistics(data)
        explore_data.visualize(data)

        #Performing data preprocessing.
        process_data = DataPreprocessor()
        input_set, output_set = process_data.split_dataset(data,0,8,8)
        process_data.display_dataset()
        process_data.summarize(input_set, 0, 5, 3)

        #Model evaluation using class Evaluator.
        evaluator = Evaluator()
        evaluator.validate(LogisticRegression(), input_set, output_set, 10, 7)
        evaluator.evaluate(LogisticRegression(), input_set, output_set, 10, 7,'log_loss')

        #Selecting best model using class ModelSelector.
        model = ModelSelector()
        #A set of models for selection.
        models = []
        models.append(( ' LR ' , LogisticRegression()))
        models.append(( ' LDA ' , LinearDiscriminantAnalysis()))
        models.append(( ' RF ' , RandomForestClassifier(n_estimators=100, max_features=3)))
        selected_model = model.select_model(models, input_set, output_set, 10, 7)
        print("\nSelected Model:\n %s") % (selected_model)

        #Improving Accuracy using class AccuracyImprover.
        improve_accuracy = AccuracyImprover()
        improve_accuracy.tuning(Ridge(),input_set, output_set)
        improve_accuracy.ensemble_prediction(RandomForestClassifier(n_estimators=100, max_features=3), input_set, output_set, 10, 7)

        #Finalizing the model and performing prediction.
        finalize_model = ModelFinalizer()
        input_train, input_test, output_train, output_test = finalize_model.split_train_test_sets(input_set, output_set, 0.33, 7)
        finalize_model.finalize_and_save(LogisticRegression(), "../../neeraj/resource/pima_model.sav", input_train, output_train)
        finalize_model.predict("../../neeraj/resource/pima_model.sav", input_test, output_test)
Beispiel #8
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()
        self.networkbuilder = NetworkBuilder()

    def build_network(self):
        self.model = self.networkbuilder.build_vgg()
        # self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        print('[+] Training network')
        self.model.fit(self.dataset.images,
                       self.dataset.labels,
                       validation_set=(self.dataset.images_test,
                                       self.dataset.labels_test),
                       n_epoch=100,
                       batch_size=100,
                       shuffle=True,
                       show_metric=True,
                       snapshot_step=200,
                       snapshot_epoch=True,
                       run_id=RUN_NAME)

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)

    def load_model(self):
        self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)
Beispiel #9
0
def train_model(mode: str, train: str, dev: str, test: str, model_path: str,
                schemas: list, tech: str):
    print(f'using {tech} to create features')
    print('loading data')
    dataloader = DatasetLoader(train, dev, test)
    vocab = get_vocab(dataloader.train_data, 2000)
    print(f'vocab size {len(vocab)}')

    # if tech == 'bow':
    #     classifier = MultinomialNB()
    # else:
    #     classifier = GaussianNB()

    #classifier = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial', max_iter=2000, class_weight='balanced')
    classifier = SVC()
    print(mode)
    print('creating training vectors')
    if mode == 'schema':
        if tech == 'bow':
            train_vectors = bow(
                preprocess(dataloader.train_data, schemas)[0], vocab)
            test_vectors = bow(
                preprocess(dataloader.test_data, schemas)[0], vocab)
        else:
            train_vectors = tfidf(
                preprocess(dataloader.train_data, schemas)[0], vocab)
            test_vectors = tfidf(
                preprocess(dataloader.test_data, schemas)[0], vocab)
    else:
        if tech == 'bow':
            train_vectors = bow(
                preprocess(dataloader.train_data, schemas)[1], vocab)
            test_vectors = bow(
                preprocess(dataloader.test_data, schemas)[1], vocab)
        else:
            train_vectors = tfidf(
                preprocess(dataloader.train_data, schemas)[1], vocab)
            test_vectors = tfidf(
                preprocess(dataloader.test_data, schemas)[1], vocab)

    train_labels = get_labels(dataloader.train_data)

    print('training the model')
    print(f'{train_vectors.shape}')
    model = classifier.fit(train_vectors, train_labels)

    print('creating testing vectors and testing model')
    test_pred = model.predict(test_vectors)
    test_true = get_labels(dataloader.test_data)
    print(classification_report(test_true, test_pred))
    print(accuracy_score(test_true, test_pred))
    cm = matrix(test_true, test_pred)
    print(f'confusion matrix \n {cm} \n')

    print('saving the model')
    joblib.dump(model, model_path)
Beispiel #10
0
def train(args,model,processor):
    train_dataset = load_and_cache_examples(args, processor, data_type='train')
    train_loader = DatasetLoader(data=train_dataset, batch_size=args.batch_size,
                                 shuffle=False, seed=args.seed, sort=True,
                                 vocab = processor.vocab,label2id = args.label2id)
    parameters = [p for p in model.parameters() if p.requires_grad]
    optimizer = optim.Adam(parameters, lr=args.learning_rate)
    scheduler = ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=3,
                                  verbose=1, epsilon=1e-4, cooldown=0, min_lr=0, eps=1e-8)
    best_f1 = 0
    for epoch in range(1, 1 + args.epochs):
        print(f"Epoch {epoch}/{args.epochs}")
        pbar = ProgressBar(n_total=len(train_loader), desc='Training')
        train_loss = AverageMeter()
        model.train()
        assert model.training
        for step, batch in enumerate(train_loader):
            input_ids, input_mask, input_tags, input_lens = batch
            input_ids = input_ids.to(args.device)
            input_mask = input_mask.to(args.device)
            input_tags = input_tags.to(args.device)
            features, loss = model.forward_loss(input_ids, input_mask, input_lens, input_tags)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_norm)
            optimizer.step()
            optimizer.zero_grad()
            pbar(step=step, info={'loss': loss.item()})
            train_loss.update(loss.item(), n=1)
        print(" ")
        train_log = {'loss': train_loss.avg}
        if 'cuda' in str(args.device):
            torch.cuda.empty_cache()
        eval_log, class_info = evaluate(args,model,processor)
        logs = dict(train_log, **eval_log)
        show_info = f'\nEpoch: {epoch} - ' + "-".join([f' {key}: {value:.4f} ' for key, value in logs.items()])
        logger.info(show_info)
        scheduler.epoch_step(logs['eval_f1'], epoch)
        if logs['eval_f1'] > best_f1:
            logger.info(f"\nEpoch {epoch}: eval_f1 improved from {best_f1} to {logs['eval_f1']}")
            logger.info("save model to disk.")
            best_f1 = logs['eval_f1']
            if isinstance(model, nn.DataParallel):
                model_stat_dict = model.module.state_dict()
            else:
                model_stat_dict = model.state_dict()
            state = {'epoch': epoch, 'arch': args.arch, 'state_dict': model_stat_dict}
            model_path = args.output_dir / 'best-model.bin'
            torch.save(state, str(model_path))
            print("Eval Entity Score: ")
            for key, value in class_info.items():
                info = f"Subject: {key} - Acc: {value['acc']} - Recall: {value['recall']} - F1: {value['f1']}"
                logger.info(info)
Beispiel #11
0
def main():
    dataset_loader = DatasetLoader("../geoguessrBotDatasets/geoguessrWorld/",
                                   (48, 64), "coordinates")
    train_dataset = dataset_loader.load_dataset(0, 9000, "train_dataset")
    val_dataset = dataset_loader.load_dataset(9000, 10000,
                                              "validation_dataset")

    models = {
        "CNN": CNNModel(),
        "noinput": NoInputModel(),
    }

    validation_loss_histories = dict()
    training_loss_histories = dict()

    for model_name, model in models.items():
        model.train(train_dataset, val_dataset, 100)
        training_loss_histories[model_name] = model.training_loss_history
        validation_loss_histories[model_name] = model.validation_loss_history

    images_to_predict, labels_to_predict = next(iter(val_dataset))
    predict_images(models, images_to_predict, labels_to_predict)
Beispiel #12
0
    def __init__(self, initial_weight_path, weight_save_path_tmpl, initial_t=0):
        self.clients = []
        self.t = initial_t
        self.weight_save_path_tmpl = weight_save_path_tmpl
        self.dataset_loader = DatasetLoader.init_by_setting(dist_params)
        self.varserver_db = varserverapi.open_db()

        self.reserved_vars_by_t = defaultdict(list)#variable ids used for specific iteration
        packer = WeightPack(json.load(open(dist_params.WEIGHT_PACK_PARAM_FILE)))
        initial_weights = packer.unpack(open(initial_weight_path, "rb").read())
        self.optimizer = Optimizer(initial_weights, packer, lr=dist_params.LR)
        self.weight_id = None
        self.gradient_ids = []
        self.n_gradient_gathered = 0
Beispiel #13
0
    def __init__(self,
                 initial_weight_path,
                 weight_save_path_tmpl,
                 initial_t=0):
        self.clients = []
        self.t = initial_t
        self.weight_save_path_tmpl = weight_save_path_tmpl
        self.dataset_loader = DatasetLoader.init_by_setting(dist_params)
        self.varserver_db = varserverapi.open_db()

        self.reserved_vars_by_t = defaultdict(
            list)  #variable ids used for specific iteration
        self.weights_bin = open(initial_weight_path, "rb").read()
        self.weight_id = None
        self.gradient_ids = []
Beispiel #14
0
def find_shortest_visiting_path(adj_list, path):

    coordinates_df = DatasetLoader('coordinates').dataset

    # Get Start, end, between
    start = path[0]
    end = path[-1]
    between = path[1:len(path) - 1]

    total_distance = 0
    total_path = []

    if len(
            between
    ) == 0:  # No destinations in between, just get distance from starting point to end (A -> B)
        total_distance, total_path = dijkstra_h(adj_list, start, end)
    else:
        between = path  # Because the recursive functions need all nodes. The assumptions in the explanation hold true.
        shortest_estimated_path = get_short_between_path(
            between, start, end, coordinates_df)

        # Now that you have an ideal path, find the actual path using dijkstra (with heuristics)
        for i in range(0, len(shortest_estimated_path) - 1):
            tmp_dist, tmp_path = dijkstra_h(adj_list,
                                            shortest_estimated_path[i],
                                            shortest_estimated_path[i + 1])
            total_distance += tmp_dist
            total_path.extend(tmp_path)

    # Avoiding linked duplicates like going from n1 to n1
    total_path_no_linked_dup = []
    for i in range(len(total_path) - 1):
        node_1 = total_path[i]
        node_2 = total_path[i + 1]

        if node_1 != node_2:
            total_path_no_linked_dup.append((node_1, node_2))

    return total_distance, total_path_no_linked_dup
Beispiel #15
0
def best_tree(nodes, adj_list):

    coordinates_df = DatasetLoader('coordinates').dataset
    res = find_best_initial(nodes, adj_list, coordinates_df)
    visited_nodes = res[1]
    distance = res[0]
    edge_set = res[2]

    must_visited = [
        x for x in nodes if x not in visited_nodes
    ]  # create must_visited nodes from the ones we didn't visit in initial phase
    #dijkstra_calls = 0

    while must_visited:
        min_dist = float('inf')
        for i in must_visited:
            for j in visited_nodes:
                dist = distance_nodes(i, j, coordinates_df)
                #dijkstra_calls += 1
                #dist, seq = dijkstra_h(adj_list, i , j)
                if dist < min_dist:  # we only calcullate dijkstra if euclidean distance is less than min distance
                    #dijkstra_calls += 1
                    dist, seq = dijkstra_h(adj_list, i, j)
                    min_dist = dist
                    best_seq = seq

        for i in range(len(best_seq) - 1):
            edge_set.append(tuple([best_seq[i], best_seq[i + 1]]))

        distance += min_dist
        must_visited = list(
            set(must_visited) - set(best_seq)
        )  #we remove the nodes algo visited during dijkstra from must_visited
        visited_nodes = list(set(visited_nodes).union(
            set(best_seq)))  # and add those nodes to viste_nodes set

    #print("Dijkstra calls: ", dijkstra_calls)

    return (edge_set, distance)
Beispiel #16
0
def evaluate(args,model,processor):
    eval_dataset = load_and_cache_examples(args,processor, data_type='dev')
    eval_dataloader = DatasetLoader(data=eval_dataset, batch_size=args.batch_size,
                                 shuffle=False, seed=args.seed, sort=False,
                                 vocab=processor.vocab, label2id=args.label2id)
    pbar = ProgressBar(n_total=len(eval_dataloader), desc="Evaluating")
    metric = SeqEntityScore(args.id2label,markup=args.markup)
    eval_loss = AverageMeter()
    model.eval()
    fout = open(args.output_dir / 'bilstm+crf.result.txt','w')
    with torch.no_grad():
        for step, batch in enumerate(eval_dataloader):
            input_chars, input_ids, input_mask, input_tags, input_lens = batch
            input_ids = input_ids.to(args.device)
            input_mask = input_mask.to(args.device)
            input_tags = input_tags.to(args.device)
            features, loss = model.forward_loss(input_ids, input_mask, input_lens, input_tags)
            eval_loss.update(val=loss.item(), n=input_ids.size(0))
            tags, _ = model.crf._obtain_labels(features, args.id2label, input_lens)
            input_tags = input_tags.cpu().numpy()
            target = [input_[:len_] for input_, len_ in zip(input_tags, input_lens)]
            # 从这里可以看出,这个输出只适用于batch=1
            assert(len(tags[0])==len(input_tags[0]))
            for i in range(len(tags[0])):
                fout.write(input_chars[i] + ' ' + args.id2label[input_tags[0][i]] + ' ' + tags[0][i] + '\n')
                print(input_chars[i], tags[0][i], args.id2label[input_tags[0][i]])
                # print(processor.vocab.to_word(input_chars[0][i]), tags[0][i], args.id2label[input_tags[0][i]])
            fout.write("\n")
            metric.update(pred_paths=tags, label_paths=target)
            pbar(step=step)
    print(" ")
    fout.close()
    eval_info, class_info = metric.result()
    eval_info = {f'eval_{key}': value for key, value in eval_info.items()}
    result = {'eval_loss': eval_loss.avg}
    result = dict(result, **eval_info)
    return result, class_info
Beispiel #17
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        #self.network = local_response_normalization(self.network)
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 128, 4, activation='relu')
        self.network = dropout(self.network, 0.3)
        self.network = fully_connected(self.network, 3072, activation='relu')
        self.network = fully_connected(self.network,
                                       len(EMOTIONS),
                                       activation='softmax')
        self.network = regression(self.network,
                                  optimizer='momentum',
                                  loss='categorical_crossentropy')
        self.model = tflearn.DNN(self.network,
                                 checkpoint_path=SAVE_DIRECTORY +
                                 '/emotion_recognition',
                                 max_checkpoints=1,
                                 tensorboard_verbose=2)
        self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print('[+] Training network')
        self.model.fit(self.dataset.images,
                       self.dataset.labels,
                       validation_set=(self.dataset.images_test,
                                       self.dataset.labels_test),
                       n_epoch=100,
                       batch_size=50,
                       shuffle=True,
                       show_metric=True,
                       snapshot_step=200,
                       snapshot_epoch=True,
                       run_id='emotion_recognition')

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)

    def load_model(self):
        if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
            self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
            print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)
Beispiel #18
0
 def __init__(self):
   self.dataset = DatasetLoader()
   print("aqui1")
Beispiel #19
0
def main():

    warnings.filterwarnings("ignore")

    parser = argparse.ArgumentParser(description="excute inference.py")
    parser.add_argument(
        '--is_generated',
        type=str2bool,
        help='Whether Generating Splited Dataset using train / test.csv')
    parser.add_argument('--n_folds',
                        type=int,
                        help='Number of Split folds on train dataset')
    parser.add_argument('--output',
                        type=output_type,
                        help='Dataset what getting prediction')
    parser.add_argument('--epochs', type=int, help='EPOCH size for DL')
    parser.add_argument('--batch_size', type=int, help='BATCH size for DL')
    parser.add_argument('--use_pretrained',
                        type=str2bool,
                        help="using pretrained model for training")
    parser.add_argument('--predict_file',
                        type=str,
                        help='filename for final prediction')

    args = parser.parse_args()

    is_generated = args.is_generated
    n_folds = args.n_folds
    output = str(args.output)
    EPOCHS = args.epochs
    BATCH_SIZE = args.batch_size
    USE_PRETRAINED = args.use_pretrained
    RESULT_FILENAME = args.predict_file

    loader = DatasetLoader()

    if not is_generated:
        loader.split(n_folds)

    trn_dict, val_dict, test_dict = loader.load_split(n_folds)

    train_transforms = transforms.Compose([
        transforms.ToPILImage(),
        transforms.RandomRotation(30),
        transforms.RandomHorizontalFlip(),
        transforms.Grayscale(num_output_channels=1),
        transforms.ToTensor(),
        transforms.Normalize([0.5], [0.5])
    ])

    valid_transforms = transforms.Compose([
        transforms.ToPILImage(),
        transforms.ToTensor(),
        transforms.Normalize([0.5], [0.5])
    ])

    train_dataset = EMNST_Dataset(img_dict=trn_dict,
                                  img_width=28,
                                  img_height=28,
                                  transform=train_transforms)
    valid_dataset = EMNST_Dataset(img_dict=val_dict,
                                  img_width=28,
                                  img_height=28,
                                  transform=valid_transforms)
    test_dataset = EMNST_Test_Dataset(img_dict=test_dict,
                                      img_width=28,
                                      img_height=28,
                                      transform=valid_transforms)

    train_loader = DataLoader(train_dataset,
                              batch_size=BATCH_SIZE,
                              shuffle=True)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=BATCH_SIZE,
                              shuffle=False)
    test_loader = DataLoader(test_dataset,
                             batch_size=BATCH_SIZE,
                             shuffle=False)

    DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"Available Device is {DEVICE}")

    model = CustomCNN()
    criterion = nn.CrossEntropyLoss()
    learning_rate = 0.001
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    evaluator = Evaluator(criterion, optimizer)

    model_path = os.path.join(loader.get_basepath(), 'output', 'models')

    if not os.path.isdir(model_path):
        os.mkdir(model_path)

    model_state_file = os.path.join(model_path, 'EMNST_CNN.pt')
    model_val_loss_file = os.path.join(model_path, 'EMNST_CNN_SCORE.txt')

    if USE_PRETRAINED:
        if os.path.isfile(model_state_file):
            model.load_state_dict(torch.load(model_state_file))
            with open(model_val_loss_file, 'r') as f:
                best_loss_init = float(f.read())
                best_loss = best_loss_init

    else:
        best_loss_init = np.inf
        best_loss = best_loss_init

    for epoch in range(EPOCHS):
        train_loss, valid_loss, train_acc, valid_acc = evaluator.train(
            model, train_loader, valid_loader)
        if valid_loss < best_loss:
            print("valid_loss broken from {:.4f} to {:.4f}".format(
                best_loss, valid_loss))
            torch.save(model.state_dict(), model_state_file)
            best_loss = valid_loss
        print(
            "epoch: {}/{} | trn loss: {:.4f} | val loss: {:.4f} | trn acc: {:.4f} | val acc: {:.4f}"
            .format(epoch + 1, EPOCHS, train_loss, valid_loss, train_acc,
                    valid_acc))

    if best_loss < best_loss_init:
        model.load_state_dict(torch.load(model_state_file))
        with open(model_val_loss_file, 'w') as f:
            f.write(str(best_loss.item()))

    else:
        model.load_state_dict(torch.load(model_state_file))

    if output == "valid":
        result_dict = evaluator.predict(model=model,
                                        target_loader=valid_loader,
                                        output_type=output,
                                        val_dict=val_dict)
    else:
        result_dict = evaluator.predict(model=model, target_loader=test_loader)

    loader.submit(result_dict, RESULT_FILENAME, output_type=output)
Beispiel #20
0
from dataset_loader import DatasetLoader
from func_1 import get_neighbourhood, vis_1
from func_2 import best_tree
from func_3 import shortest_ordered_path
from func_4 import find_shortest_visiting_path
from visualize import print_itinerary

# getting query from user for function and distance type
func = int(input("Choose your function: (Enter an integer between 1 to 4) "))
metric = input(
    "Choose a distance function : (distance/time-distance/network-distance) ")

print('Loading the datasets..')
adj_list = make_adj_list(
    distance_metric=metric)  # Let's load the data with the chosen metric
coordinates = DatasetLoader(
    'coordinates').dataset  # This doesn't vary, regardless of the metric
print('Done.\n\n')

if adj_list != None:
    if func == 1:
        ''' Showing FUNCTIONALITY 1 '''
        ''' 
            Some additional notes: 
    
                - When using the network distance, pay attention to the maximum distance
                for neighbours to be considered. An high number will result in a recursion
                with too many levels. 
    
                - When using time-distance, if the results seem wrong (no neighbours for example)
                try with a higher number (2000 and up)
        '''
    def __init__(self):
        # MANDATORY FOR JETSON
        self.prevent_gpu_sync_failed()

        self.dataset = DatasetLoader()
        self.input_shape = [SIZE_FACE, SIZE_FACE, 1]
 def __init__(self):
   self.dataset = DatasetLoader()
class MoodRecognition:

  def __init__(self):
    self.dataset = DatasetLoader()

  def build_network(self):
    # Building 'AlexNet'
    # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
    # https://github.com/DT42/squeezenet_demo
    # https://github.com/yhenon/pysqueezenet/blob/master/squeezenet.py
    print('[+] Building CNN')
    self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1])
    self.network = conv_2d(self.network, 96, 11, strides = 4, activation = 'relu')
    self.network = max_pool_2d(self.network, 3, strides = 2)
    self.network = local_response_normalization(self.network)
    self.network = conv_2d(self.network, 256, 5, activation = 'relu')
    self.network = max_pool_2d(self.network, 3, strides = 2)
    self.network = local_response_normalization(self.network)
    self.network = conv_2d(self.network, 256, 3, activation = 'relu')
    self.network = max_pool_2d(self.network, 3, strides = 2)
    self.network = local_response_normalization(self.network)
    self.network = fully_connected(self.network, 1024, activation = 'tanh')
    self.network = dropout(self.network, 0.5)
    self.network = fully_connected(self.network, 1024, activation = 'tanh')
    self.network = dropout(self.network, 0.5)
    self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax')
    self.network = regression(self.network,
      optimizer = 'momentum',
      loss = 'categorical_crossentropy')
    self.model = tflearn.DNN(
      self.network,
      checkpoint_path = SAVE_DIRECTORY + '/alexnet_mood_recognition',
      max_checkpoints = 1,
      tensorboard_verbose = 2
    )
    self.load_model()

  def load_saved_dataset(self):
    self.dataset.load_from_save()
    print('[+] Dataset found and loaded')

  def start_training(self):
    self.load_saved_dataset()
    self.build_network()
    if self.dataset is None:
      self.load_saved_dataset()
    # Training
    print('[+] Training network')
    self.model.fit(
      self.dataset.images, self.dataset.labels,
      validation_set = (self.dataset.images_test, self.dataset.labels_test),
      n_epoch = 100,
      batch_size = 50,
      shuffle = True,
      show_metric = True,
      snapshot_step = 200,
      snapshot_epoch = True,
      run_id = 'alexnet_mood_recognition'
    )

  def predict(self, image):
    if image is None:
      return None
    image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
    return self.model.predict(image)

  def save_model(self):
    self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
    print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)

  def load_model(self):
    if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
      self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
      print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)
Beispiel #24
0
def print_percent(dataset, current, total):
    sys.stdout.write("\033[K")
    percent = int(i*100.0/total)
    sys.stdout.write("\r" + dataset + ': ' + str(percent) + '%')
    sys.stdout.flush()


def save_results(dataset, accuracies):
    data = {
        'accuracies': accuracies,
        'average': np.mean(accuracies),
        'standard deviation': np.std(accuracies),
        'number of executions': len(accuracies)
    }
    if not os.path.exists('./results/'):
        os.makedirs('./results/')
    with open('./results/' + dataset + '.json', 'w') as file:
        json.dump(data, file, sort_keys=True, indent=4)

for dataset in datasets():
    loader = DatasetLoader('./datasets/' + dataset)
    best_accuracies = []
    for i in xrange(NUMBER_OF_EXECUTIONS):
        print_percent(dataset, i+1, NUMBER_OF_EXECUTIONS)
        classifier = EvolutiveKNN(loader.examples, loader.labels)
        classifier.train()
        best_accuracies.append(classifier.global_best.fitness)
    print ''
    save_results(dataset, best_accuracies)
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, GRAY])
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        #self.network = local_response_normalization(self.network)
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 128, 4, activation='relu')
        self.network = dropout(self.network, 0.3)
        self.network = fully_connected(self.network,
                                       3072,
                                       activation='relu',
                                       name='relu-fully-connected')
        self.network = fully_connected(self.network,
                                       len(EMOTIONS),
                                       activation='softmax',
                                       name='softmax-fully-connected')
        self.network = regression(
            self.network,
            optimizer='momentum',
            name='regression',
            #learning_rate= 1.0,
            loss='categorical_crossentropy')
        self.model = tflearn.DNN(self.network,
                                 checkpoint_path='model/turing_60epo_50batch',
                                 max_checkpoints=1,
                                 tensorboard_dir="logs/",
                                 tensorboard_verbose=3)
        self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print('[+] Training network')
        print("[+] Size test 1: " + str(len(self.dataset.images_test)))
        print("[+] Size label 1: " + str(len(self.dataset.labels_test)))
        #self.images_test = np.load(SAVE_DATASET_IMAGES_TEST_FILENAME)
        #self.labels_test = np.load(SAVE_DATASET_LABELS_TEST_FILENAME)
        #self.images_test = self.images.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        #self.labels_test = self.labels.reshape([-1, len(EMOTIONS)])
        #print ("[+] Size test 2: " + str(len(self.dataset.images_test)))

        self.model.fit(
            self.dataset.images,
            self.dataset.labels,
            #validation_set = 0.25,
            validation_set=(self.dataset.images_test,
                            self.dataset._labels_test),
            n_epoch=20,
            batch_size=50,
            shuffle=True,
            show_metric=True,
            snapshot_step=200,
            snapshot_epoch=True,
            run_id='turing_140epo_50batch')

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save("model/turing_140epo_50batch")
        print('[+] Model trained and saved at model/turing_140epo_50batch')

    def load_model(self):
        #if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
        #print("aqui\n\n\n")
        self.model.load("model/turing_140epo_50batch")
        print('[+] Model loaded from model/turing_120epo_50batch\n')
class EmotionRecognition:

  def __init__(self):
    self.dataset = DatasetLoader()

  def build_network(self):
    # Smaller 'Googlenet'
    # https://github.com/tflearn/tflearn/blob/master/examples/images/googlenet.py
    print('[+] Building Inception V3')
    print ('[-] COLOR: ' + str(COLOR))
    print('[-] BATH_SIZE' + str(BATH_SIZE_CONSTANT))
    print('[-] EXPERIMENTAL_LABEL' + EXPERIMENTO_LABEL)

    self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, COLOR])

    self.conv1_7_7 = conv_2d(self.network, 64, 7, strides=2, activation='relu', name='conv1_7_7_s2')
    self.pool1_3_3 = max_pool_2d(self.conv1_7_7, 3, strides=2)
    self.pool1_3_3 = local_response_normalization(self.pool1_3_3)
    self.conv2_3_3_reduce = conv_2d(self.pool1_3_3, 64, 1, activation='relu', name='conv2_3_3_reduce')
    self.conv2_3_3 = conv_2d(self.conv2_3_3_reduce, 192, 3, activation='relu', name='conv2_3_3')
    self.conv2_3_3 = local_response_normalization(self.conv2_3_3)
    self.pool2_3_3 = max_pool_2d(self.conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')

    # 3a
    self.inception_3a_1_1 = conv_2d(self.pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
    self.inception_3a_3_3_reduce = conv_2d(self.pool2_3_3, 96, 1, activation='relu', name='inception_3a_3_3_reduce')
    self.inception_3a_3_3 = conv_2d(self.inception_3a_3_3_reduce, 128, filter_size=3,  activation='relu', name='inception_3a_3_3')
    self.inception_3a_5_5_reduce = conv_2d(self.pool2_3_3, 16, filter_size=1, activation='relu', name='inception_3a_5_5_reduce')
    self.inception_3a_5_5 = conv_2d(self.inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name='inception_3a_5_5')
    self.inception_3a_pool = max_pool_2d(self.pool2_3_3, kernel_size=3, strides=1, name='inception_3a_pool')
    self.inception_3a_pool_1_1 = conv_2d(self.inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')
    self.inception_3a_output = merge([self.inception_3a_1_1, self.inception_3a_3_3, self.inception_3a_5_5, self.inception_3a_pool_1_1], mode='concat', axis=3)

    # 3b
    self.inception_3b_1_1 = conv_2d(self.inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_1_1')
    self.inception_3b_3_3_reduce = conv_2d(self.inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce')
    self.inception_3b_3_3 = conv_2d(self.inception_3b_3_3_reduce, 192, filter_size=3, activation='relu', name='inception_3b_3_3')
    self.inception_3b_5_5_reduce = conv_2d(self.inception_3a_output, 32, filter_size=1, activation='relu', name='inception_3b_5_5_reduce')
    self.inception_3b_5_5 = conv_2d(self.inception_3b_5_5_reduce, 96, filter_size=5,  name='inception_3b_5_5')
    self.inception_3b_pool = max_pool_2d(self.inception_3a_output, kernel_size=3, strides=1,  name='inception_3b_pool')
    self.inception_3b_pool_1_1 = conv_2d(self.inception_3b_pool, 64, filter_size=1, activation='relu', name='inception_3b_pool_1_1')
    self.inception_3b_output = merge([self.inception_3b_1_1, self.inception_3b_3_3, self.inception_3b_5_5, self.inception_3b_pool_1_1], mode='concat', axis=3, name='inception_3b_output')
    self.pool3_3_3 = max_pool_2d(self.inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')

    # 4a
    self.inception_4a_1_1 = conv_2d(self.pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
    self.inception_4a_3_3_reduce = conv_2d(self.pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
    self.inception_4a_3_3 = conv_2d(self.inception_4a_3_3_reduce, 208, filter_size=3,  activation='relu', name='inception_4a_3_3')
    self.inception_4a_5_5_reduce = conv_2d(self.pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
    self.inception_4a_5_5 = conv_2d(self.inception_4a_5_5_reduce, 48, filter_size=5,  activation='relu', name='inception_4a_5_5')
    self.inception_4a_pool = max_pool_2d(self.pool3_3_3, kernel_size=3, strides=1,  name='inception_4a_pool')
    self.inception_4a_pool_1_1 = conv_2d(self.inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1')
    self.inception_4a_output = merge([self.inception_4a_1_1, self.inception_4a_3_3, self.inception_4a_5_5, self.inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output')

    # 4b
    self.inception_4b_1_1 = conv_2d(self.inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1')
    self.inception_4b_3_3_reduce = conv_2d(self.inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce')
    self.inception_4b_3_3 = conv_2d(self.inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3')
    self.inception_4b_5_5_reduce = conv_2d(self.inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce')
    self.inception_4b_5_5 = conv_2d(self.inception_4b_5_5_reduce, 64, filter_size=5,  activation='relu', name='inception_4b_5_5')
    self.inception_4b_pool = max_pool_2d(self.inception_4a_output, kernel_size=3, strides=1,  name='inception_4b_pool')
    self.inception_4b_pool_1_1 = conv_2d(self.inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1')
    self.inception_4b_output = merge([self.inception_4b_1_1, self.inception_4b_3_3, self.inception_4b_5_5, self.inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output')

    # 4c
    self.inception_4c_1_1 = conv_2d(self.inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_1_1')
    self.inception_4c_3_3_reduce = conv_2d(self.inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce')
    self.inception_4c_3_3 = conv_2d(self.inception_4c_3_3_reduce, 256,  filter_size=3, activation='relu', name='inception_4c_3_3')
    self.inception_4c_5_5_reduce = conv_2d(self.inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce')
    self.inception_4c_5_5 = conv_2d(self.inception_4c_5_5_reduce, 64,  filter_size=5, activation='relu', name='inception_4c_5_5')
    self.inception_4c_pool = max_pool_2d(self.inception_4b_output, kernel_size=3, strides=1)
    self.inception_4c_pool_1_1 = conv_2d(self.inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1')
    self.inception_4c_output = merge([self.inception_4c_1_1, self.inception_4c_3_3, self.inception_4c_5_5, self.inception_4c_pool_1_1], mode='concat', axis=3, name='inception_4c_output')

    # 4d
    self.inception_4d_1_1 = conv_2d(self.inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1')
    self.inception_4d_3_3_reduce = conv_2d(self.inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce')
    self.inception_4d_3_3 = conv_2d(self.inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3')
    self.inception_4d_5_5_reduce = conv_2d(self.inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce')
    self.inception_4d_5_5 = conv_2d(self.inception_4d_5_5_reduce, 64, filter_size=5,  activation='relu', name='inception_4d_5_5')
    self.inception_4d_pool = max_pool_2d(self.inception_4c_output, kernel_size=3, strides=1,  name='inception_4d_pool')
    self.inception_4d_pool_1_1 = conv_2d(self.inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1')
    self.inception_4d_output = merge([self.inception_4d_1_1, self.inception_4d_3_3, self.inception_4d_5_5, self.inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output')

    # 4e
    self.inception_4e_1_1 = conv_2d(self.inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1')
    self.inception_4e_3_3_reduce = conv_2d(self.inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce')
    self.inception_4e_3_3 = conv_2d(self.inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3')
    self.inception_4e_5_5_reduce = conv_2d(self.inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce')
    self.inception_4e_5_5 = conv_2d(self.inception_4e_5_5_reduce, 128,  filter_size=5, activation='relu', name='inception_4e_5_5')
    self.inception_4e_pool = max_pool_2d(self.inception_4d_output, kernel_size=3, strides=1,  name='inception_4e_pool')
    self.inception_4e_pool_1_1 = conv_2d(self.inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1')
    self.inception_4e_output = merge([self.inception_4e_1_1, self.inception_4e_3_3, self.inception_4e_5_5, self.inception_4e_pool_1_1], axis=3, mode='concat')
    self.pool4_3_3 = max_pool_2d(self.inception_4e_output, kernel_size=3, strides=2, name='pool_3_3')

    # 5a
    self.inception_5a_1_1 = conv_2d(self.pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1')
    self.inception_5a_3_3_reduce = conv_2d(self.pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
    self.inception_5a_3_3 = conv_2d(self.inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3')
    self.inception_5a_5_5_reduce = conv_2d(self.pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce')
    self.inception_5a_5_5 = conv_2d(self.inception_5a_5_5_reduce, 128, filter_size=5,  activation='relu', name='inception_5a_5_5')
    self.inception_5a_pool = max_pool_2d(self.pool4_3_3, kernel_size=3, strides=1,  name='inception_5a_pool')
    self.inception_5a_pool_1_1 = conv_2d(self.inception_5a_pool, 128, filter_size=1, activation='relu', name='inception_5a_pool_1_1')
    self.inception_5a_output = merge([self.inception_5a_1_1, self.inception_5a_3_3, self.inception_5a_5_5, self.inception_5a_pool_1_1], axis=3, mode='concat')

    # 5b
    self.inception_5b_1_1 = conv_2d(self.inception_5a_output, 384, filter_size=1, activation='relu', name='inception_5b_1_1')
    self.inception_5b_3_3_reduce = conv_2d(self.inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce')
    self.inception_5b_3_3 = conv_2d(self.inception_5b_3_3_reduce, 384,  filter_size=3, activation='relu', name='inception_5b_3_3')
    self.inception_5b_5_5_reduce = conv_2d(self.inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce')
    self.inception_5b_5_5 = conv_2d(self.inception_5b_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5b_5_5')
    self.inception_5b_pool = max_pool_2d(self.inception_5a_output, kernel_size=3, strides=1,  name='inception_5b_pool')
    self.inception_5b_pool_1_1 = conv_2d(self.inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1')
    self.inception_5b_output = merge([self.inception_5b_1_1, self.inception_5b_3_3, self.inception_5b_5_5, self.inception_5b_pool_1_1], axis=3, mode='concat')
    self.pool5_7_7 = avg_pool_2d(self.inception_5b_output, kernel_size=7, strides=1)
    self.pool5_7_7 = dropout(self.pool5_7_7, 0.4)

    # fc
    self.loss = fully_connected(self.pool5_7_7, len(EMOTIONS), activation='softmax')
    self.network = regression(self.loss, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    self.model = tflearn.DNN(
      self.network,
      checkpoint_path = CHECKPOINT_DIR,
      max_checkpoints = 1,
      tensorboard_dir = TENSORBOARD_DIR,
      #best_checkpoint_path = CHECKPOINT_DIR_BEST,
      tensorboard_verbose = 1
    )
    #self.load_model()

  def load_saved_dataset(self):
    self.dataset.load_from_save()
    print('[+] Dataset found and loaded')

  def start_training(self):
    self.load_saved_dataset()
    self.build_network()
    if self.dataset is None:
      self.load_saved_dataset()
    # Training
    print('[+] Training network')

    print ("[+] Size train: " + str(len(self.dataset.images)))
    print ("[+] Size train-label: " + str(len(self.dataset.labels)))
    print ("[+] Size test: " + str(len(self.dataset.images_test)))
    print ("[+] Size test-label: " + str(len(self.dataset.labels_test)))

    self.model.fit(
      self.dataset.images, self.dataset.labels,
      #validation_set = 0.33,
      validation_set = (self.dataset.images_test, self.dataset._labels_test),
      n_epoch = 500,
      batch_size = BATH_SIZE_CONSTANT,
      shuffle = True,
      show_metric = True,
      snapshot_step = 200,
      snapshot_epoch = True,
      run_id = EXPERIMENTO_LABEL
    )

  def predict(self, image):
    if image is None:
      return None
    image = image.reshape([-1, SIZE_FACE, SIZE_FACE, COLOR])
    return self.model.predict(image)

  def save_model(self):
    self.model.save(MODEL_LABEL)
    print('[+] Model trained and saved at ' + MODEL_LABEL )

  def load_model(self):
    self.model.load(MODEL_LABEL)
    print('[+] Model loaded from ' + MODEL_LABEL)
Beispiel #27
0
class EmotionRecognition:

  def __init__(self):
    self.dataset = DatasetLoader()
    print("aqui1")

  def build_network(self):
    # 32 layers: n=5, 56 layers: n=9, 110 layers: n=18
    n = 5
    #https://github.com/tflearn/tflearn/blob/master/examples/images/residual_network_cifar10.py
    print('[+] Building RESIDUAL NETWORK')
    print ('[-] COLOR: ' + str(COLOR))
    print('[-] BATH_SIZE' + str(BATH_SIZE_CONSTANT))
    print('[-] EXPERIMENTAL_LABEL' + EXPERIMENTO_LABEL)

    self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, COLOR])
    self.network = tflearn.conv_2d(self.network, 16, 3, regularizer='L2', weight_decay=0.0001)
    self.network = tflearn.residual_block(self.network, n, 16)
    self.network = tflearn.residual_block(self.network, 1, 32, downsample=True)
    self.network = tflearn.residual_block(self.network, n-1, 32)
    self.network = tflearn.residual_block(self.network, 1, 64, downsample=True)
    self.network = tflearn.residual_block(self.network, n-1, 64)
    self.network = tflearn.batch_normalization(self.network)
    self.network = tflearn.activation(self.network, 'relu')
    self.network = tflearn.global_avg_pool(self.network)
    # Regression
    self.network = tflearn.fully_connected(self.network, len(EMOTIONS), activation='softmax')
    self.mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    self.network = tflearn.regression(self.network, optimizer=self.mom,
                                      loss='categorical_crossentropy')

    self.model = tflearn.DNN(
      self.network,
      checkpoint_path = CHECKPOINT_DIR,
      max_checkpoints = 1,
      tensorboard_dir = TENSORBOARD_DIR,
      #best_checkpoint_path = CHECKPOINT_DIR_BEST,
      tensorboard_verbose = 1
    )
    self.load_model()

  def load_saved_dataset(self):
    self.dataset.load_from_save()
    print('[+] Dataset found and loaded')

  def start_training(self):
    self.load_saved_dataset()
    self.build_network()
    if self.dataset is None:
      self.load_saved_dataset()
    # Training
    print('[+] Training network')

    print ("[+] Size train: " + str(len(self.dataset.images)))
    print ("[+] Size train-label: " + str(len(self.dataset.labels)))
    print ("[+] Size test: " + str(len(self.dataset.images_test)))
    print ("[+] Size test-label: " + str(len(self.dataset.labels_test)))

    self.model.fit(
      self.dataset.images, self.dataset.labels,
      #validation_set = 0.33,
      validation_set = (self.dataset.images_test, self.dataset._labels_test),
      n_epoch = 100,
      batch_size = BATH_SIZE_CONSTANT,
      shuffle = True,
      show_metric = True,
      snapshot_step = 200,
      snapshot_epoch = True,
      run_id = EXPERIMENTO_LABEL
    )

  def predict(self, image):
    if image is None:
      return None
    image = image.reshape([-1, SIZE_FACE, SIZE_FACE, COLOR])
    return self.model.predict(image)

  def save_model(self):
    self.model.save(MODEL_LABEL)
    print('[+] Model trained and saved at ' + MODEL_LABEL )

  def load_model(self):
    self.model.load('model-full-data/resnet-full-data-33201')
    #self.model.load(MODEL_LABEL)
    print('[+] Model loaded from ' + MODEL_LABEL)
Beispiel #28
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):
        padding = 'SAME'
        print(' ')
        print('----------------- Building CNN -----------------')
        print(' ')
        self.network = tflearn.input_data(
            shape=[None, SIZE_FACE, SIZE_FACE, 1])

        conv_1 = tflearn.relu(
            conv_2d(self.network,
                    96,
                    3,
                    strides=1,
                    bias=True,
                    padding=padding,
                    activation=None,
                    name='Conv_1'))
        maxpool_1 = tflearn.max_pool_2d(conv_1,
                                        3,
                                        strides=2,
                                        padding=padding,
                                        name='MaxPool_1')
        maxpool_1 = tflearn.batch_normalization(maxpool_1)

        conv_2 = tflearn.relu(
            conv_2d(maxpool_1,
                    108,
                    2,
                    strides=1,
                    padding=padding,
                    name='Conv_2'))
        maxpool_2 = tflearn.max_pool_2d(conv_2,
                                        2,
                                        strides=1,
                                        padding=padding,
                                        name='MaxPool_2')
        maxpool_2 = tflearn.batch_normalization(maxpool_2)

        conv_3 = tflearn.relu(
            conv_2d(maxpool_2,
                    208,
                    2,
                    strides=1,
                    padding=padding,
                    name='Conv_3'))
        conv_4 = tflearn.relu(
            conv_2d(conv_3, 64, 2, strides=1, padding=padding, name='Conv_4'))
        maxpool_3 = tflearn.max_pool_2d(conv_4,
                                        2,
                                        strides=1,
                                        padding=padding,
                                        name='MaxPool_3')
        maxpool_3 = tflearn.batch_normalization(maxpool_3)

        net = tflearn.flatten(maxpool_3, name='Net')
        net = tflearn.dropout(net, 0.1)

        final_1 = tflearn.fully_connected(net, 512, activation='relu')
        final_1 = tflearn.dropout(final_1, 0.5)

        final_2 = tflearn.fully_connected(final_1, 256, activation='relu')
        final_2 = tflearn.dropout(final_2, 0.5)

        Loss = tflearn.fully_connected(final_2,
                                       7,
                                       activation='softmax',
                                       name='Total_loss')

        self.network = tflearn.regression(Loss,
                                          optimizer='Adam',
                                          loss='categorical_crossentropy',
                                          learning_rate=0.0001)
        self.model = tflearn.DNN(self.network,
                                 tensorboard_verbose=0,
                                 tensorboard_dir=os.getcwd() + '/checkpoint',
                                 checkpoint_path='./data/' +
                                 '/emotion_recognition',
                                 max_checkpoints=None)
        #self.model = tflearn.DNN(self.network)
        self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print(' ')
        print('----------------- Dataset found and loaded -----------------')
        print(' ')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print(' ')
        print('----------------- Training network -----------------')
        print(' ')
        #self.model.fit(self.dataset.images, self.dataset.labels, validation_set = (self.dataset.images_test, self.dataset._labels_test),
        #	n_epoch = 3,batch_size = 100,shuffle = True,show_metric = True,snapshot_epoch = True,run_id = 'emotion_recognition')
        print('hello world')
        self.model.fit(self.dataset.images,
                       self.dataset.labels,
                       n_epoch=140,
                       validation_set=(self.dataset.images_test,
                                       self.dataset._labels_test),
                       show_metric=True,
                       batch_size=100,
                       run_id='emotion_recognition')

        self.model.predict(self.dataset.images_test)
        predictions = self.model.predict(self.dataset.images_test)
        matrix = confusion_matrix(self.dataset._labels_test.argmax(axis=1),
                                  predictions.argmax(axis=1))
        print(matrix)
        print(
            classification_report(self.dataset._labels_test.argmax(axis=1),
                                  predictions.argmax(axis=1),
                                  target_names=EMOTIONS))

#evali=self.model.evaluate(self.dataset.images_test, self.dataset._labels_test)
#print("Accuracy of the model is :", evali)
#lables = model.predict_label(self.dataset.images_test)
#print("The predicted labels are :",lables[f])
#prediction = model.predict(testImages)
#print("The predicted probabilities are :", prediction[f])

    def load_model(self):
        if isfile("CNN_Trained_model.meta"):
            self.model.load("CNN_Trained_model")

            print(' ')
            print('----------------- Model loaded -----------------')
            print(' ')
        else:
            print(' ')
            print('----------------- Can not load the model -----------------')
            print(' ')

    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print(' ')
        print(
            '------------------ Model trained and saved ----------------------'
        )
        print(' ')

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)
Beispiel #29
0
import numpy as np
import time
from keras.preprocessing.image import save_img
from keras.applications.vgg16 import VGG16
from keras.models import load_model
from keras import backend as K

from dataset_loader import DatasetLoader
from constants import *

# Dimensions of the generated pictures for each filter.
img_width = 32
img_height = 32

# Load dataset
# Comment/uncomment to select dataset to use
dataset = DatasetLoader()

# CK Extended no resize
dataset.ck_extended_load_from_save()
classes = CK_EXTENDED_EMOTIONS

# Load model
model = load_model(MODELS_PATH + 'model_ck_extended_inception_v3_1.h5')
print ('[+] Model loaded')
print (model.summary())

from quiver_engine.server import launch
launch(model, classes=classes, input_folder='./imgs')
Beispiel #30
0
 logging.basicConfig(level=logging.DEBUG,
                     filename="dcnn_regression_gender_losocv.log",
                     format="%(asctime)s %(levelname)s %(message)s")
 yaml_file_path = sys.argv[1]
 parameters = load_parameters(yaml_file_path)
 labels_file_path = parameters["labels_file_path"][0]
 input_folder_path = parameters["input_folder_path"][0]
 psychological_construct = parameters["psychological_construct"][0]
 epochs = parameters["epochs"][0]
 likert_score_file_path = parameters["likert_scores_file_path"][0]
 loss_function = parameters["loss_function"][0]
 demographics_file_path = parameters["demographics_file"][0]
 gender = parameters["gender"][0]
 print(gender)
 dataset_loader = DatasetLoader(
     labels_file_path=labels_file_path,
     input_folder_path=input_folder_path,
     likert_scores_file_path=likert_score_file_path)
 index = dataset_loader.get_index(psychological_construct)
 labels = dataset_loader.labels
 students_gender_train = filter_students(labels.keys(),
                                         demographics_file_path, gender)
 print(students_gender_train)
 run_config = tf.ConfigProto()
 run_config.gpu_options.allow_growth = True
 with tf.Session(config=run_config) as sess:
     checkpoint_dir = parameters["checkpoint_dir"][0]
     checkpoint_dir = os.path.join(checkpoint_dir, psychological_construct)
     cnn_classifier = CNN(sess=sess,
                          epochs=epochs,
                          checkpoint_dir=checkpoint_dir,
                          loss=loss_function)
Beispiel #31
0
 def __init__(self):
     self.dataset = DatasetLoader()
Beispiel #32
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
        #print(input_data)
        self.network = conv_2d(self.network,
                               64,
                               5,
                               activation='relu',
                               padding='valid')
        #print(self.network)
        #self.network = local_response_normalization(self.network)
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network,
                               64,
                               5,
                               activation='relu',
                               padding='valid')
        #print(self.network)
        #self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network,
                               128,
                               3,
                               activation='relu',
                               padding='valid')
        #print(self.network)
        self.network = dropout(self.network, 0.3)
        self.network = fully_connected(self.network, 3072, activation='relu')
        self.network = fully_connected(self.network,
                                       len(EMOTIONS),
                                       activation='softmax')
        self.network = regression(self.network,
                                  optimizer='momentum',
                                  loss='categorical_crossentropy')
        self.model = tflearn.DNN(self.network,
                                 checkpoint_path=SAVE_DIRECTORY +
                                 '/emotion_recognition',
                                 max_checkpoints=1,
                                 tensorboard_verbose=2
                                 #session = 'session'
                                 )
        #self.load_model()
        self.model.load(
            'C:\\Users\\kingjy79\\Documents\\rlawhdduq1\\pn_emotion_copy3\\emotion\\data\\emotion_recognition-17325'
        )
        convolution_layer2 = tflearn.variables.get_layer_variables_by_name(
            'Conv2D_1')[0]  #return은 tensor
        print(convolution_layer2)
        convolution_layer2_weight = self.model.get_weights(
            convolution_layer2)  #return은 numpy임
        print(convolution_layer2_weight)
        np.savetxt(
            'C:\\Users\\kingjy79\\Documents\\rlawhdduq1\\pn_emotion_copy3\\emotion\\data\\convolution_layer2_weight.txt',
            X=convolution_layer2_weight.flatten(),
            fmt='%.6f')
        #convloution_layer1를 numpy로 변환
        convolution_layer2_weight_file0 = open(
            'C:\\Users\\kingjy79\\Documents\\rlawhdduq1\\pn_emotion_copy3\\emotion\\data\\convolution_layer2_weight_modify.txt',
            'r')
        convolution_layer2_weight_modify = []
        for i in range(102400):
            line = float(convolution_layer2_weight_file0.readline())
            convolution_layer2_weight_modify.append(line)
        convolution_layer2_weight_file0.close()
        convolution_layer2_weight_modify = np.asarray(
            convolution_layer2_weight_modify)
        print('non reshape')
        print(convolution_layer2_weight_modify)
        '''
        convolution_layer2_weight_modify_max = 0
        convolution_layer2_weight_modify_min = 0
        for i in range(102400):
            if(convolution_layer2_weight_modify_max < convolution_layer2_weight_modify[i]):
                convolution_layer2_weight_modify_max = convolution_layer2_weight_modify[i]
            if(convolution_layer2_weight_modify_min > convolution_layer2_weight_modify[i]):
                convolution_layer2_weight_modify_min = convolution_layer2_weight_modify[i]
        RESOLUTION = 65535
        RESOLUTION_WIDTH = (convolution_layer2_weight_modify_max - convolution_layer2_weight_modify_min)/RESOLUTION
        for i in range(102400):
            if(convolution_layer2_weight_modify[i] >= convolution_layer2_weight_modify_min and \
            convolution_layer2_weight_modify[i] < convolution_layer2_weight_modify_min + RESOLUTION):
                convolution_layer2_weight_modify[i]=(2*convolution_layer2_weight_modify_min +RESOLUTION_WIDTH)/2
        '''
        convolution_layer2_weight_modify = np.reshape(
            convolution_layer2_weight_modify, [5, 5, 64, 64])
        print('reshape')
        print(convolution_layer2_weight_modify)

        #convolution_layer1 = np.asarray(convolution_layer1)
        #convolution_layer2 = tf.convert_to_tensor(convolution_layer1)
        #self.model.set_weights(convolution_layer1, convolution_layer1*10) # 이 위치의 두번째 인자에 원하고자하는 weight(tensor형식) 값을 넣으면 된다.
        #self.model.set_weights(convolution_layer1, convolution_layer1 + tf.ones([5, 5, 1, 64]))
        #convolution_layer1_weight = self.model.get_weights(convolution_layer1)
        #print(convolution_layer1_weight)

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print('[+] Training network')
        self.model.fit(self.dataset.images,
                       self.dataset.labels,
                       validation_set=(self.dataset.images_test,
                                       self.dataset.labels_test),
                       n_epoch=100,
                       batch_size=50,
                       shuffle=True,
                       show_metric=True,
                       snapshot_step=200,
                       snapshot_epoch=True,
                       run_id='emotion_recognition')

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)

    def load_model(self):
        if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
            self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
            print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)