Exemple #1
0
 def to(self, dev):
     self.data = Data(self.data.x.to(dev), self.data.edge_index.to(dev))
     self.train_mask = self.train_mask.to(dev)
     self.train_pos_edge_mask = self.train_pos_edge_mask.to(dev)
     self.train_pos_edge_index = self.train_pos_edge_index.to(dev)
     self.test_pos_edge_index = self.test_pos_edge_index.to(dev)
     self._train_neg_edge_index = self._train_neg_edge_index.to(dev)
     self._test_neg_edge_index = self._test_neg_edge_index.to(dev)
Exemple #2
0
    def reorder_node(self):
        x = torch.empty_like(self.data.x)

        observed_node = list(self.observed_graph.nodes)
        self.__relabel_graph(x, observed_node, 0)
        left_node = set(list(self.graph.nodes)) - set(observed_node)
        self.__relabel_graph(x, left_node, self.observed_index)

        self.observed_graph = self.__rebuild_graph(self.observed_graph)
        self.graph = self.__rebuild_graph(self.graph)

        all_edge = [e for e in self.graph.edges]
        self.data = Data(x, torch.tensor(all_edge).transpose(0, 1))

        logging.info(
            "reorder graph, make observed graph at left-up corner of the adj matrix"
        )
Exemple #3
0
def TeacherTest():
    correct = 0
    net = t.load('./TNet')
    net.cuda()
    test_data = Data(opt.data_path, mode='t10k')
    test_dataloader = DataLoader(test_data,
                                 batch_size=opt.batch_size,
                                 shuffle=True)
    total = len(test_data)
    for img, label in test_dataloader:
        img = img.float().cuda()
        out = net(img)
        a, predict = t.max(out.data, 1)
        label = label.long().cuda()
        correct += (predict == label).sum()

    acc = (100 * correct / total).float().item()
    print('correct:%s' % correct.item())
    print('Accuracy=%2.2f' % acc)
Exemple #4
0
def StudentTrain():
    Snet = S_Neural_net()
    Snet.train().cuda()
    Tnet = t.load('./TNet')
    Tnet.eval()
    train_data = Data(opt.data_path)
    train_dataloader = DataLoader(train_data,
                                  batch_size=opt.batch_size,
                                  shuffle=True)
    criterion = nn.CrossEntropyLoss()
    loss_fn = nn.KLDivLoss()

    optimizer = t.optim.SGD(Snet.parameters(), lr=opt.lr_1)
    for epoch in range(opt.max_epoch):
        print('current epoch:%s' % epoch)
        for i, (img, label) in enumerate(train_dataloader):
            img, label = Variable(img), Variable(label)

            optimizer.zero_grad()
            img = img.float().cuda()
            label = label.long().cuda()

            T_probe = nn.functional.softmax(Tnet(img) / opt.T)
            # TeacherLoss = criterion(T_probe,label)
            S_probe_1 = nn.functional.softmax(Snet(img) / opt.T)

            # loss_1 = (opt.T)*(opt.T)*loss_fn(S_probe_1,T_probe)

            S_probe_2 = nn.functional.softmax(Snet(img))
            loss_2 = criterion(S_probe_2, label)
            # StudentLoss = (1-opt.lamda)*loss_1 + opt.lamda*loss_2
            StudentLoss = distillation(Snet(img),
                                       label,
                                       Tnet(img),
                                       T=20,
                                       alpha=0.7)
            StudentLoss.backward()
            optimizer.step()
            if i % 10 == 0:
                print('student_loss:%5.5f' % StudentLoss.data[0])
    t.save(Snet, 'student_net')
Exemple #5
0
def TeacherTrain():
    net = T_Neural_net()
    net.cuda()
    train_data = Data(opt.data_path)
    train_dataloader = DataLoader(train_data,
                                  batch_size=opt.batch_size,
                                  shuffle=True)
    criterion = nn.CrossEntropyLoss()
    optimizer = t.optim.SGD(net.parameters(), lr=opt.lr)
    for epoch in range(opt.max_epoch):
        print('current epoch:%s' % epoch)
        for i, (img, label) in enumerate(train_dataloader):
            optimizer.zero_grad()
            img = img.float().cuda()
            label = label.long().cuda()
            output = net(img)
            loss = criterion(output, label)
            loss.backward()
            optimizer.step()
            # print('%5.5f'%loss.data[0])
            if i % 20 == 0:
                print('loss:%5.5f' % loss.data[0])
    t.save(net, 'TNet')
Exemple #6
0
  try:
    tf.config.experimental.set_memory_growth(gpus[0], True)
  except RuntimeError as e:
    print(e)

from tensorflow.keras.layers import *
from tensorflow.keras.activations import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.initializers import *
from tensorflow.keras.callbacks import *

from Dataset import Data


data = Data(extracting_images=True)
data.data_augmentation(augment_size=1200)
x_train_splitted, x_val, y_train_splitted, y_val = data.get_splitted_train_validation_set()
x_train, y_train = data.get_train_set()
x_test, y_test = data.get_test_set()
num_classes = data.num_classes

# Define the CNN
def model_cnn(optimizer, learning_rate, dropout_rate,
              filter_block1, kernel_size_block1, 
              filter_block2, kernel_size_block2, 
              kernel_size_block3, filter_block3, 
              dense_layer_size, kernel_initializer, 
              bias_initializer, activation_str):
    """Creates the CNN model
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard

# Fix CuDnn problem
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
    try:
        tf.config.experimental.set_memory_growth(gpus[0], True)
    except RuntimeError as e:
        print(e)

from sklearn.model_selection import train_test_split

from Dataset import Data

data = Data(language='en', creating_parquet=False)  # 'de'
classes = data.get_num_classes()
data.preprocess_labels()
dataframe = data.dataframe
labels = list(dataframe.columns.values)
labels = [label for label in labels if label not in ['text', 'label']]


(x_train, y_train), (x_test, y_test), preproc =\
     text.texts_from_df(
                        dataframe,
                        text_column='text',
                        label_columns=labels,
                        maxlen=200,
                        max_features=3500,
                        preprocess_mode='bert',
Exemple #8
0
    loss_dir = -1 * loss_dir_batch / batch_size
    return loss_dir

if __name__ == '__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] ='0'

    dataset_path = './voxel'
    train_scene_txt = os.path.join(dataset_path ,'train.txt')
    val_scene_txt = os.path.join(dataset_path ,'val.txt')

    train_scenes = read_txt(train_scene_txt)
    val_scenes = read_txt(val_scene_txt)

    _dataset_path = os.path.join(dataset_path, 'voxel')
    train_data = Data(_dataset_path, train_scenes, val_scenes , mode = 'train')
    val_data = Data(_dataset_path, train_scenes, val_scenes , mode = 'val')
    train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=FLAGS.batchsize, shuffle=True, 
                    num_workers=10)
    val_dataloader = torch.utils.data.DataLoader(val_data, batch_size=FLAGS.batchsize, shuffle=False, 
                    num_workers=10)

    mtml = MTML().cuda()
    mtml = torch.nn.DataParallel(mtml, device_ids = [0])
    optim_params = [
        {'params' : mtml.parameters() , 'lr' : FLAGS.learning_rate , 'betas' : (0.9, 0.999) , 'eps' : 1e-08 },
    ]
    optimizer = optim.Adam(optim_params , lr=learning_rate ,weight_decay=Weight_Decay)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.5)

    # Ratio of loss function
Exemple #9
0
"""Script to train a topic model
"""
import ktrain
from Dataset import Data

data = Data(language='en', creating_parquet=False)
data = data.dataframe

tm = ktrain.text.get_topic_model(data['text'], n_features=150)
tm.print_topics()
tm.build(data['text'], threshold=0.2)
texts = tm.filter(data['text'])
categories = tm.filter(data['label'])
tm.print_topics(show_counts=True)
tm.save('text_classifier/models/english_LDA/')

Exemple #10
0
import numpy as np
import matplotlib.pyplot as plt
from Dataset import Data
from KalmanFilter import KalmanFilter
from Metric import MSE

# Dataset Values
dataset = Data('posicion.dat', 'velocidad.dat', 'aceleracion.dat')
position = dataset.get_position()
velocity = dataset.get_velocity()
acceleration = dataset.get_acceleration()

# Initial Conditions
x0 = np.array([
    10.7533, 36.6777, -45.1769, 1.1009, -17.0, 35.7418, -5.7247, 3.4268, 5.2774
])
p0 = np.diag(np.array([100, 100, 100, 1, 1, 1, 0.1, 0.1, 0.1]))

# Input Matrix
b = np.eye(9)

# Sample time
h = 1

# Process Matrix
eye = np.eye(3)
a_1 = np.hstack((eye, eye * h, eye * ((h**2) * 0.5)))
a_2 = np.hstack((np.zeros(eye.shape), eye, eye * h))
a_3 = np.hstack((np.zeros(eye.shape), np.zeros(eye.shape), eye))
a = np.vstack((a_1, a_2, a_3))
Exemple #11
0
  except RuntimeError as e:
    print(e)

from tensorflow.keras.layers import *
from tensorflow.keras.activations import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.initializers import *
from tensorflow.keras.callbacks import *
 
from Dataset import Data

num_words = 3500
maxlen = 200
embedding_dim = 100
data = Data(language='en', creating_parquet=True)
classes = data.get_num_classes()
data.preprocess_labels()
data.preprocess_texts(num_words=num_words, maxlen=maxlen)
data.split_data(test_size=0.25)
x_train, y_train = data.get_train_set()
x_test, y_test = data.get_test_set()

def model_lstm(optimizer, learning_rate,
               num_words, embedding_dim,
               maxlen, num_classes):
    
    # Input
    input_text = Input(shape=x_train.shape[1:])
    # Embedding
    x = Embedding(input_dim=num_words, output_dim=embedding_dim, input_length=maxlen)(input_text)