Exemplo n.º 1
0
def predictor():

    # Get predictions from model
    model()

    # Empty rankings dictionary to pass to html file
    the_rankings = []

    with open('predictions.csv') as predictions:
        rankings = csv.reader(predictions)

        for row in rankings:
            the_rankings.append(row)

    # Get length of "the_rankings" to pass to html file for loop
    length = len(the_rankings)

    # Possible words to choose from to make website a little more interactive
    intro = [
        "Currently", "At this point", "At present", "Right now", "For now",
        "As of now", "Well"
    ]
    intro_next = [
        "it seems like", "it seems as though", "it looks like",
        "chances are that"
    ]

    intro_for_predictor = random.choice(intro)
    intro_next_for_predictor = random.choice(intro_next)

    team1 = the_rankings[1][1]
    team2 = the_rankings[2][1]
    team3 = the_rankings[3][1]
    team4 = the_rankings[4][1]
    team5 = the_rankings[5][1]
    team6 = the_rankings[6][1]

    team3rdlast = the_rankings[-3][1]
    team2ndlast = the_rankings[-2][1]
    teamlast = the_rankings[-1][1]

    return render_template("predictor.html",
                           the_rankings=the_rankings,
                           length=length,
                           intro_for_predictor=intro_for_predictor,
                           intro_next_for_predictor=intro_next_for_predictor,
                           team1=team1,
                           team2=team2,
                           team3=team3,
                           team4=team4,
                           team5=team5,
                           team6=team6,
                           team3rdlast=team3rdlast,
                           team2ndlast=team2ndlast,
                           teamlast=teamlast)
Exemplo n.º 2
0
    def __init__(self,data,variance_threshold = 1e3,alpha = 1e-2,max_source = 10,epochs = 500,\
        center_movement_allow = 0.1,scale = 20,center_num = 1,method = 'Okumura_Hata',sample_num = None):
        '''
        alpha:  更新的缩放系数,类似于学习率
        max_source:发射源的最大数量,分裂出的中心数量不能超过该数值
        epochs:查找发射源时的迭代次数
        center_movement_allow(km):中心点移动距离的最小许用值,小于此值认为找到发射源,结束迭代
        scale:  可视化散点图中点的缩小倍数
        methods:使用的模型,可选'free' 或 'Okumura_Hata' 或 'Egli'
        sample_num:监测点数量,默认根据data自动计算
        '''
        self.__alpha = alpha  #更新的缩放系数,类似于学习率
        self.__max_source = max_source #发射源的最大数量
        self.__epochs = epochs #迭代次数
        self.__center_movement_allow = center_movement_allow#中心点移动距离小于此值认为找到发射源,结束迭代
        self.__scale = scale #可视化散点图中点的缩小倍数
        self.__method = method #使用的模型,可选'free' 或 'Okumura_Hata' 或 'Egli'
        self.__variance_threshold = variance_threshold #增加中心点的方差阈值
        self.__centers_num = center_num#中心点初始数量
        if not sample_num == None:
            self.point_num = sample_num   #样本(监测点)数量 简记为n
        self.data = self.__init_data(data,sample_nums=sample_num)

        self.centers = self.__init_center(self.data)  #(1,2)-->(m,2)
        self.__model = model(launch_frequency = self.data[0,2]) #假设所有点频率相同
        self.center_power = []
        self.__point_power_predict = []
        self.loss = []
Exemplo n.º 3
0
def train(train_dir, test_dir):

    data_generator = ImageDataGenerator(rescale=1.0 / 255.0, zoom_range=0.2)

    batch_size = 32

    training_data = data_generator.flow_from_directory(directory=train_dir,
                                                       target_size=(64, 64),
                                                       batch_size=batch_size,
                                                       class_mode='binary')

    testing_data = data_generator.flow_from_directory(directory=test_dir,
                                                      target_size=(64, 64),
                                                      batch_size=batch_size,
                                                      class_mode='binary')

    mode = model(training_data)

    fitted_model = mode.fit_generator(training_data,
                                      steps_per_epoch=1000,
                                      epochs=20,
                                      validation_data=testing_data,
                                      validation_steps=1000)

    mode.save('final_model.h5')
Exemplo n.º 4
0
def create_model(sess, CONFIGS):

    text_model = model(CONFIGS)

    checkpt = tf.train.get_checkpoint_state(CONFIGS.ckpt_dir)
    if checkpt:
        print("Restoring old model parameters from %s" %
              checkpt.model_checkpoint_path)
        text_model.saver.restore(sess, checkpt.model_checkpoint_path)

    return text_model
Exemplo n.º 5
0
def kFLR(kFolds, learningRate):  # Splitting the Dataset
    X_split = np.split(X, kFolds)
    Y_split = np.split(target_df, kFolds)
    X_test = []
    X_train = []
    # Instantiating LinearRegression() Model
    for i in range(len(X_split)):
        X_intermediateTrain = []
        for j in range(len(X_split)):
            if i == j:
                X_test.append(X_split[j])
            else:
                X_intermediateTrain.append(X_split[j])
    # Training/Testing the Model
        X_train.append(X_intermediateTrain)
    X_trainSet = []
    for i in X_train:
        X_trainSet.append(np.matrix(pd.concat(i)))
    Y_test = []
    Y_train = []
    for i in range(len(Y_split)):
        Y_intermediateTrain = []
        for j in range(len(Y_split)):
            if i == j:
                Y_test.append(Y_split[j])
            else:
                Y_intermediateTrain.append(Y_split[j])

        Y_train.append(Y_intermediateTrain)
    Y_trainSet = []
    for i in Y_train:
        Y_trainSet.append(np.matrix(pd.concat(i)))

    MeanAbsoluteError = []
    for i in range(kFolds):
        X_trainSet[i] = np.array(X_trainSet[i]).T
        Y_trainSet[i] = np.array(Y_trainSet[i]).T
        X_test[i] = np.array(X_test[i]).T
        Y_test[i] = np.array(Y_test[i]).T
        MeanAbsoluteError.append(
            model(X_trainSet[i], Y_trainSet[i], X_test[i], Y_test[i],
                  learningRate, 150))

    totalSum = 0
    for i in MeanAbsoluteError:
        totalSum = totalSum + i

    MeanAbsoluteError = totalSum / kFolds
    acc = round((100 - MeanAbsoluteError), 2)
    return acc
Exemplo n.º 6
0
def test(model,iterator,criterion):
    model.eval()
    epoch_loss = 0
    epoch_bleu = 0
    for _, (src, trg) in enumerate(iterator):
            src, trg = src.to(device), trg.to(device)

            output = model(src, trg, 0) #turn off teacher forcing

            output = output[1:].view(-1, output.shape[-1])
            trg = trg[1:].view(-1)

            loss = criterion(output, trg)
            bleu=bleu(output,trg)
            epoch_loss += loss.item()
            epoch_bleu+=bleu
    return epoch_loss / len(iterator),epoch_bleu / len(iterator)
Exemplo n.º 7
0
def train():

    # Import data
    img_batch, label_batch = distorted_inputs('C:\\Users\Akira.DESKTOP-HM7OVCC\Desktop\data\\', 20)

    # Create the model
    x = tf.placeholder(tf.float32, [None, 64, 64, 3], name='input')

    # Define loss and optimizer
    y = tf.placeholder(tf.float32, [None, 4], name='label')

    # Build the graph for the deep net
    logits = model(x)

    with tf.name_scope("xent"):
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(
                logits=logits, labels=y), name="loss")
        tf.summary.scalar("loss", loss)

    with tf.name_scope("train"):
        train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)

    with tf.name_scope("accuracy"):
        correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar("accuracy", accuracy)

    # tf.reset_default_graph()
    sess = tf.InteractiveSession()
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())
    merged = tf.summary.merge_all()
    writer = tf.summary.FileWriter('C:\\Users\Akira.DESKTOP-HM7OVCC\Desktop\data\\', sess.graph)
    tf.train.start_queue_runners(sess=sess)

    for i in range(500):
        img, label = sess.run([img_batch, label_batch])
        if i % 10 == 0:
            train_accuracy = accuracy.eval(feed_dict={x: img, y: label})
            print('step %d, training accuracy %g' % (i, train_accuracy))
            result = sess.run(merged, feed_dict={x: img, y: label})
            writer.add_summary(result, i)
        sess.run(train_step, feed_dict={x: img, y: label})
    saver.save(sess, SAVE_PATH)
    print('Model stored...')
def main():
    #loading the dataset from .h5 files
    #test_catvnoncat.h5
    #train_catvnoncat.h5
    train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset(
    )
    print(" no of training samples : {0}".format(train_set_y.shape[1]))
    print(" no of testing samples : {0}".format(test_set_y.shape[1]))
    print(" Height and width of an image in the sample : {0} {1} {2}".format(
        train_set_x_orig.shape[1], train_set_x_orig.shape[2],
        train_set_x_orig.shape[3]))
    ### reshaping the training and test data set in
    ### train_set_x_flatten that contains image samples stored as columns and each row is a features
    train_set_x_flatten = train_set_x_orig.reshape(
        train_set_x_orig.shape[0], train_set_x_orig.shape[1] *
        train_set_x_orig.shape[2] * train_set_x_orig.shape[3]).T
    test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0],
                                                 -1).T
    # print(train_set_x_flatten.shape, test_set_x_flatten.shape)
    # print("*****sanity checking *****")
    # print(str(train_set_x_flatten[:140,0]))
    # print(train_set_x_orig)

    #standarding the dataset by taking the mean and subtracting the data by mean and dividing it with the standard deviation.
    #but where the range of the data is same, so here we just divide the data by 255
    train_set_x = train_set_x_flatten / 255
    test_set_x = test_set_x_flatten / 255

    d = model(train_set_x,
              train_set_y,
              test_set_x,
              test_set_y,
              num_iterations=100000,
              learning_rate=0.4,
              print_cost=True)

    print(d["num_iterations"])
    print(d["learning_rate"])

    costs = np.squeeze(d['costs'])
    plt.plot(costs)
    plt.ylabel('Cost')
    plt.xlabel('Iterations (per hundreds)')
    plt.title("Learning rate =" + str(d["learning_rate"]))
    plt.show()
Exemplo n.º 9
0
def train(model, dataloader, dataset, device, optimizer, criterion):
    model.train()
    running_loss = 0.0
    counter = 0
    for i, data in tqdm(enumerate(dataloader),
                        total=int(len(dataset) / dataloader.batch_size)):
        counter += 1
        data = data[0]
        data = data.to(device)
        optimizer.zero_grad()
        reconstruction, mu, logvar = model(data)
        bce_loss = criterion(reconstruction, data)
        loss = final_loss(bce_loss, mu, logvar)
        loss.backward()
        running_loss += loss.item()
        optimizer.step()
    train_loss = running_loss / counter
    return train_loss
Exemplo n.º 10
0
def train():
    loss_fn = torch.nn.NLLLoss()
    optimizer = torch.optim.Adam(lr=config.learning_rate, params=model.parameters())
    # torch.autograd.set_detect_anomaly(True)
    for i in range(config.EPOCH):
        for j in range(config.iter_per_epoch):
            hidden = model.init_hidden()
            optimizer.zero_grad()
            loss = 0
            category_tensor, name_tensor, target_tensor = loader.get_training_data()
            target_tensor.unsqueeze_(-1)
            for k in range(name_tensor.size(0)):
                output, hidden = model(category_tensor, name_tensor[k], hidden)
                # print(output.shape, target_tensor[k].shape)
                loss += loss_fn(output, target_tensor[k])
            loss.backward(retain_graph=True)
            optimizer.step()
            if j % 1000 == 0:
                print(loss)
Exemplo n.º 11
0
def validate(model, dataloader, dataset, device, criterion):
    model.eval()
    running_loss = 0.0
    counter = 0
    with torch.no_grad():
        for i, data in tqdm(enumerate(dataloader),
                            total=int(len(dataset) / dataloader.batch_size)):
            counter += 1
            data = data[0]
            data = data.to(device)
            reconstruction, mu, logvar = model(data)
            bce_loss = criterion(reconstruction, data)
            loss = final_loss(bce_loss, mu, logvar)
            running_loss += loss.item()

            # save the last batch input and output of every epoch
            if i == int(len(dataset) / dataloader.batch_size) - 1:
                recon_images = reconstruction
    val_loss = running_loss / counter
    return val_loss, recon_images
    def init(self, r, c, b):
        self.model = model(r, c, b)
        self.view = view(self)
        self.view.numBombsRemaining.setText(str(
            self.model.getBombsRemaining()))
        self.view.restartButton.clicked.connect(self.restart)
        self.view.timer.timeout.connect(self.every_second)

        #print the actual value
        self.view.beginnerLeaderboard.setText(
            self.model.getManager().printDatas(
                self.model.getManager().beginner, "Easy"))
        self.view.intermediateLeaderboard.setText(
            self.model.getManager().printDatas(
                self.model.getManager().intermediate, "Intermediate"))
        self.view.expertLeaderboard.setText(self.model.getManager().printDatas(
            self.model.getManager().expert, "Expert"))

        #connecting the signals for the game rules
        for i in range(self.model.getNumRows() * self.model.getNumCols()):
            self.model.getIBox(i).bomb.connect(self.model.bombClicked)
            self.model.getIBox(i).bomb.connect(self.endgame)
            self.model.getIBox(i).flaggeds.connect(self.model.flagF)
            self.model.getIBox(i).flaggeds.connect(self.updateBombsLabel)
            self.model.getIBox(i).unflaggeds.connect(self.model.unflagF)
            self.model.getIBox(i).unflaggeds.connect(self.updateBombsLabel)
            self.model.getIBox(i).flaggeds.connect(self.updateBombsLabel)
            self.model.getIBox(i).revealeds.connect(self.model.revealF)
            self.model.getIBox(i).emptybox.connect(self.model.emptyNeighbors)

        #connecting the signals for the menu actions
        self.view.easyMode.triggered.connect(self.easy)
        self.view.intermediateMode.triggered.connect(self.intermediate)
        self.view.expertMode.triggered.connect(self.expert)
        self.view.customMode.triggered.connect(self.showDialog)
        self.model.customsignal.connect(self.custom)

        #connecting the win game signal
        self.model.winsignal.connect(self.win)
Exemplo n.º 13
0
def test(category, begin):
    def _get_letter_tensor(letter):
        letter_tensor = torch.zeros(1, loader.n_letters)
        letter_tensor[0][loader.all_letters.find(letter)] = 1
        return letter_tensor.to(config.device)
    def _get_category_tensor(category):
        category_tensor = torch.zeros(1, loader.n_categories)
        category_tensor[0][loader.all_categories.index(category)] = 1
        return category_tensor.to(config.device)
    result = begin
    hidden = model.init_hidden()
    input_tensor = _get_letter_tensor(begin)
    category_tensor = _get_category_tensor(category)
    for i in range(config.max_length):
        output, hidden = model(category_tensor, input_tensor, hidden)
        topv, topi = output.topk(1)
        topi = topi[0][0]
        # print(topi)
        if topi == loader.n_letters - 1:
            break
        result += loader.all_letters[topi]
        input_tensor = _get_letter_tensor(result[-1])
    return result
Exemplo n.º 14
0
def evaluate(model: nn.Module,
             iterator: torch.utils.data.DataLoader,
             criterion: nn.Module):

    model.eval()

    epoch_loss = 0

    with torch.no_grad():

        for _, (src, trg) in enumerate(iterator):
            src, trg = src.to(device), trg.to(device)

            output = model(src, trg, 0) #turn off teacher forcing

            output = output[1:].view(-1, output.shape[-1])
            trg = trg[1:].view(-1)

            loss = criterion(output, trg)

            epoch_loss += loss.item()

    return epoch_loss / len(iterator)
Exemplo n.º 15
0
def create_model(sess, CONFIGS):
    text_model = model(CONFIGS)
    print("Created new model.")
    sess.run(tf.global_variables_initializer())

    return text_model
Exemplo n.º 16
0
print("db = " + str(grads["db"]))
print("cost = " + str(costs))

# pridict
from Predict import predict

print("prediction = " + str(predict(w, b, X)))
print()

# model
from Model import model

d = model(X_train=train_set_x,
          Y_train=train_set_y,
          X_test=test_set_x,
          Y_test=test_set_y,
          num_iteration=2000,
          learn_rate=0.005,
          print_cost=True)
print()

# Example of a picture that was wrongly classified.
index = 5
plt.imshow(test_set_x[:, index].reshape((num_px, num_px, 3)))
# plt.show()
print("y = " + str(test_set_y[:, index]) + ", you pridict that it is a \"" +
      classes[int(d["Y_prediction_test"][0, index])].decode("utf-8") +
      "\" picture")

# plot the cost function and the gradient
# Plot learning curve (with costs)
Exemplo n.º 17
0
        target_model.model.set_weights(main_model.model.get_weights())
        main_model.target_update_counter = 0


if __name__ == '__main__':
    episode = 0
    epsilon = 1.0
    MIN = 0.0
    frame_idx = 0
    env = Environment(window_size=int(sys.argv[1]),
                      step_size=int(sys.argv[2]),
                      world_size=int(sys.argv[3]))
    show = int(sys.argv[4])
    state = env.reset()
    main_model = model(
        state.shape, INPUT_N, HIDDEN_N, ACTION_SPACE
    )  # Forward propagation. Uses current weights and performs our linear algebra
    target_model = model(
        state.shape, INPUT_N, HIDDEN_N, ACTION_SPACE
    )  # For training (Calculates gradients with backpropagation)
    target_model.model.set_weights(main_model.model.get_weights())

    while True:
        frame_idx += 1
        if show:
            env.render()

        reward, is_done = env.play_step(target_model,
                                        epsilon,
                                        view_live_progress=False)
        calc_loss(main_model, target_model, env, is_done)
Exemplo n.º 18
0
#import broadcast_model as bm
from Model import model
import numpy as np
from matplotlib import pyplot as plt
np.set_printoptions(suppress=True)


def get_random_num(k, b, dim0=30, dim1=2):
    return k * np.random.rand(dim0, dim1) + b


generater_model = model()


def rad(degree):
    return degree * np.pi / 180.0


def GetDistance(site0, site1):
    '''
    由经纬度坐标得到实际距离\n
    site0:中心点坐标,(m,2)\n
    site1:监测点坐标,(N,2)或(N,4)
    '''
    centers_num = site0.shape[0]
    dist = []
    for i in range(centers_num):
        center = site0[i, :]
        radlat1 = rad(center[0])
        radlat2 = rad(site1[:, 0])
        a = radlat1 - radlat2  #(N,)
Exemplo n.º 19
0
def test():
    return 'Hello world!'

@app.route('/predict', methods=['POST'])
def predict():
    json_ = request.json

    #Check for presence of required fields
    reqDiffAct = set(required_fields) - set(json_.keys())
    if len(reqDiffAct) > 0:
        raise UnprocessableEntity('Missing fields: %s' % reqDiffAct, status_code=422)

    #Check types
    #Infill dummies

    df = pd.DataFrame({k:[v] for k,v in json_.items()})[required_fields]
    prediction = event_predictor.score(json_)

    return jsonify({'prediction':prediction})


if __name__ == '__main__':
    scorObj = joblib.load('Data/scoring_objects.pkl')
    required_fields = scorObj['required_fields']
    event_predictor = model(scorObj['rfr_search'],
                            scorObj['memberships_per_group'],
                            scorObj['maxRsvpLimit'],
                            scorObj['w2vModel'],
                            scorObj['vecSize'])

    app.run(host='0.0.0.0', port=80)
        # if there is an error saving any jpegs
        try:
            PIL_img = Image.open(imagePath).convert(
                'L')  # convert it to grayscale
        except:
            continue
        img_numpy = np.array(PIL_img, 'uint8')

        id = int(os.path.split(imagePath)[-1].split(".")[1])
        faceSamples.append(img_numpy)
        ids.append(id)
    return faceSamples, ids


_, ids = getImagesAndLabels()
model = model((32, 32, 1), len(set(ids)))
model.load_weights('trained_model.h5')
model.summary()

cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
font = cv2.FONT_HERSHEY_SIMPLEX


def start():
    cap = cv2.VideoCapture(0)
    print('here')
    ret = True

    clip = []
    while ret:
Exemplo n.º 21
0
file_glob = './data/testdata-*.tfrecord'
# test_iterator = get_rt_dataset(hparams, file_glob)

def nll_loss(iterator, model_fn):
    _origin, _incident, _normal, _Li, _image_rgb, _image_depth, _image_position  = iterator.get_next()
    _x = tf.concat([_origin, _incident, _normal], axis=1)
    _images = tf.concat([_image_rgb, _image_depth, _image_position], axis=-1)
    _distribution = model_fn(_x, images)
    neg_log_loss = -tf.reduce_mean(_distribution.log_prob(_Li))
    return neg_log_loss

D = 3
d = ceil(D // 2)

_model_fn = model(D, d, hparams)

LOG_FREQUENCY = 1e0

steps_in_epoch = floor(TRAIN_TOTAL_SIZE / hparams.batch_size)
# t_steps_in_epoch = floor(TEST_TOTAL_SIZE / hparams.batch_size)
decay_steps = int(5e4)

global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
    hparams.learning_rate, global_step, decay_steps, 0.95)
lr_summary = tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)

_origin, _incident, _normal, _Li, _image_rgb, _image_depth, _image_position = train_iterator.get_next()
x = tf.concat([_origin, _incident, _normal], axis=1)
Exemplo n.º 22
0
for i in range(len(ty)):
    if (ty[i] == [0, 1, 0, 0, 0, 0, 0]).all():
        ty[i] = [0, 1, 0, 0, 0, 0, 0]
    else:
        ty[i] = [1, 0, 0, 0, 0, 0, 0]

weight = []
for i in range(len(graph)):
    iweight = []
    for j in graph[i]:
        iweight.append(1.0)
    weight.append(iweight)

print("Initial the model")
m = model(args)
m.add_data(x, y, graph, weight, 2, True)
m.build()
m.init_train(init_iter_graph=150)

print("Start training")
max_iters = 50000
max_accu, max_recall = 0, 0
y_score = None
for iter_cnt in range(max_iters):
    m.step_train(max_iter=1, iter_graph=0, iter_inst=1, iter_label=0)
    tpy = m.predict(tx)
    accu = comp_accu(tpy, ty)
    print(iter_cnt, accu, max_accu)
    if accu > max_accu:
        m.store_params()
Exemplo n.º 23
0
from Model import model

m=model("test.txt")
m.Uppercase();
m.Lowercase();
Exemplo n.º 24
0
def classify(treeDic, x_test  , x_train,TDdroprate,BUdroprate,lr, weight_decay,patience,n_epochs,batchsize,dataname,iter, fold_count):

    unsup_model = Net(64, 3).to(device)

    for unsup_epoch in range(25):

        optimizer = th.optim.Adam(unsup_model.parameters(), lr=lr, weight_decay=weight_decay)
        unsup_model.train()
        traindata_list, _ = loadBiData(dataname, treeDic, x_train+x_test, x_test, 0.2, 0.2)
        train_loader = DataLoader(traindata_list, batch_size=batchsize, shuffle=True, num_workers=4)
        batch_idx = 0
        loss_all = 0
        tqdm_train_loader = tqdm(train_loader)
        for Batch_data in tqdm_train_loader:
            optimizer.zero_grad()
            Batch_data = Batch_data.to(device)
            loss = unsup_model(Batch_data)
            loss_all += loss.item() * (max(Batch_data.batch) + 1)

            loss.backward()
            optimizer.step()
            batch_idx = batch_idx + 1
        loss = loss_all / len(train_loader)
    name = "best_pre_"+dataname +"_4unsup" + ".pkl"
    th.save(unsup_model.state_dict(), name)
    print('Finished the unsuperivised training.', '  Loss:', loss)
    print("Start classify!!!")
    # unsup_model.eval()

    log_train = 'logs/' + datasetname + '/' + 'train' + 'iter_' + str(iter)
    writer_train = SummaryWriter(log_train)
    log_test = 'logs/' + datasetname + '/' + 'test' + 'iter_' + str(iter)
    writer_test = SummaryWriter(log_test)

    model = Classfier(64*3,64,4).to(device)
    opt = th.optim.Adam(model.parameters(), lr=0.0005, weight_decay=weight_decay)

    train_losses = []
    val_losses = []
    train_accs = []
    val_accs = []
    early_stopping = EarlyStopping(patience=10, verbose=True)
    for epoch in range(n_epochs):
        traindata_list, testdata_list = loadBiData(dataname, treeDic, x_train, x_test, TDdroprate,BUdroprate)
        train_loader = DataLoader(traindata_list, batch_size=batchsize, shuffle=True, num_workers=4)
        test_loader = DataLoader(testdata_list, batch_size=batchsize, shuffle=True, num_workers=4)
        avg_loss = []
        avg_acc = []
        batch_idx = 0
        tqdm_train_loader = tqdm(train_loader)
        model.train()
        unsup_model.train()
        for Batch_data in tqdm_train_loader:
            Batch_data.to(device)
            _, Batch_embed = unsup_model.encoder(Batch_data.x, Batch_data.edge_index, Batch_data.batch)
            out_labels= model(Batch_embed, Batch_data)
            finalloss=F.nll_loss(out_labels,Batch_data.y)
            loss=finalloss
            opt.zero_grad()
            loss.backward()
            avg_loss.append(loss.item())
            opt.step()
            _, pred = out_labels.max(dim=-1)
            correct = pred.eq(Batch_data.y).sum().item()
            train_acc = correct / len(Batch_data.y)
            avg_acc.append(train_acc)
            print("Iter {:03d} | Epoch {:05d} | Batch{:02d} | Train_Loss {:.4f}| Train_Accuracy {:.4f}".format(iter,epoch, batch_idx,
                                                                                                 loss.item(),
                                                                                                 train_acc))
            batch_idx = batch_idx + 1
            
        writer_train.add_scalar('train_loss', np.mean(avg_loss), global_step=epoch+1)
        writer_train.add_scalar('train_acc', np.mean(avg_acc), global_step=epoch+1)
        train_losses.append(np.mean(avg_loss))
        train_accs.append(np.mean(avg_acc))

        temp_val_losses = []
        temp_val_accs = []
        temp_val_Acc_all, temp_val_Acc1, temp_val_Prec1, temp_val_Recll1, temp_val_F1, \
        temp_val_Acc2, temp_val_Prec2, temp_val_Recll2, temp_val_F2, \
        temp_val_Acc3, temp_val_Prec3, temp_val_Recll3, temp_val_F3, \
        temp_val_Acc4, temp_val_Prec4, temp_val_Recll4, temp_val_F4 = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
        model.eval()
        unsup_model.eval()
        tqdm_test_loader = tqdm(test_loader)
        for Batch_data in tqdm_test_loader:
            Batch_data.to(device)
            Batch_embed = unsup_model.encoder.get_embeddings(Batch_data)
            val_out = model(Batch_embed, Batch_data)
            val_loss  = F.nll_loss(val_out, Batch_data.y)
            temp_val_losses.append(val_loss.item())
            _, val_pred = val_out.max(dim=1)
            correct = val_pred.eq(Batch_data.y).sum().item()
            val_acc = correct / len(Batch_data.y)
            Acc_all, Acc1, Prec1, Recll1, F1, Acc2, Prec2, Recll2, F2, Acc3, Prec3, Recll3, F3, Acc4, Prec4, Recll4, F4 = evaluation4class(
                val_pred, Batch_data.y)
            temp_val_Acc_all.append(Acc_all), temp_val_Acc1.append(Acc1), temp_val_Prec1.append(
                Prec1), temp_val_Recll1.append(Recll1), temp_val_F1.append(F1), \
            temp_val_Acc2.append(Acc2), temp_val_Prec2.append(Prec2), temp_val_Recll2.append(
                Recll2), temp_val_F2.append(F2), \
            temp_val_Acc3.append(Acc3), temp_val_Prec3.append(Prec3), temp_val_Recll3.append(
                Recll3), temp_val_F3.append(F3), \
            temp_val_Acc4.append(Acc4), temp_val_Prec4.append(Prec4), temp_val_Recll4.append(
                Recll4), temp_val_F4.append(F4)
            temp_val_accs.append(val_acc)
        writer_test.add_scalar('val_loss', np.mean(temp_val_losses), global_step=epoch+1)
        writer_test.add_scalar('val_accs', np.mean(temp_val_accs), global_step=epoch+1)
        val_losses.append(np.mean(temp_val_losses))
        val_accs.append(np.mean(temp_val_accs))
        print("Epoch {:05d} | Val_Loss {:.4f}| Val_Accuracy {:.4f}".format(epoch, np.mean(temp_val_losses),
                                                                           np.mean(temp_val_accs)))

        res = ['acc:{:.4f}'.format(np.mean(temp_val_Acc_all)),
               'C1:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc1), np.mean(temp_val_Prec1),
                                                       np.mean(temp_val_Recll1), np.mean(temp_val_F1)),
               'C2:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc2), np.mean(temp_val_Prec2),
                                                       np.mean(temp_val_Recll2), np.mean(temp_val_F2)),
               'C3:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc3), np.mean(temp_val_Prec3),
                                                       np.mean(temp_val_Recll3), np.mean(temp_val_F3)),
               'C4:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc4), np.mean(temp_val_Prec4),
                                                       np.mean(temp_val_Recll4), np.mean(temp_val_F4))]
        print('unsup_epoch:', (unsup_epoch+1) ,'   results:', res)
        early_stopping(np.mean(temp_val_losses), np.mean(temp_val_accs), np.mean(temp_val_F1), np.mean(temp_val_F2),
                       np.mean(temp_val_F3), np.mean(temp_val_F4), model, 'RDEA_'+str(fold_count)+'_', dataname)
        accs =np.mean(temp_val_accs)
        F1 = np.mean(temp_val_F1)
        F2 = np.mean(temp_val_F2)
        F3 = np.mean(temp_val_F3)
        F4 = np.mean(temp_val_F4)
        if epoch>=199:
            accs = early_stopping.accs
            F1 = early_stopping.F1
            F2 = early_stopping.F2
            F3 = early_stopping.F3
            F4 = early_stopping.F4
        if early_stopping.early_stop:
            print("Early stopping")
            accs=early_stopping.accs
            F1=early_stopping.F1
            F2 = early_stopping.F2
            F3 = early_stopping.F3
            F4 = early_stopping.F4
            break
    return train_losses , val_losses ,train_accs, val_accs,accs,F1,F2,F3,F4
Exemplo n.º 25
0
from DataIn import Data_in
from Data import GenData
from Datasex import SexData
from Model import model, graph
from RFCmodel import RFC
data = Data_in()

RFC(data)
#GenData(data)
#SexData(data)
model(data)
Exemplo n.º 26
0
sourceMax = -100
for i in range(len(article_list)):
    if len(article_list[i]) > sourceMax:
        sourceMax = len(article_list[i])

emb_matrix = np.zeros(
    (len(words_to_index) + 1, word_to_vec_map['go'].shape[0]),
    dtype=np.float32)
for i in range(1, len(words_to_index)):
    emb_matrix[i] = word_to_vec_map[str(index_to_words[i])].astype(np.float32)

max_target_sentence_length = max([len(sentence) for sentence in title_list])

train_graph = tf.Graph()
with train_graph.as_default():
    myModel = model()
    inputs, targets, target_sequence_length, max_target_len, source_sequence_length = myModel.enc_dec_model_inputs(
    )
    lr, keep_prob = myModel.hyperparam_inputs()

    train_logits, infer_logits = myModel.seq2seq(
        inputs, targets, KEEP_PROB, BATCH_SIZE,
        target_sequence_length, max_target_len, len(words_to_index),
        len(words_to_index), EMBEDDING_SIZE, EMBEDDING_SIZE, RNN_SIZE,
        NUM_LAYERS, words_to_index, source_sequence_length, emb_matrix)

    training_logits = tf.identity(train_logits.rnn_output, name='logits')
    inference_logits = tf.identity(infer_logits.sample_id, name='predictions')

    # https://www.tensorflow.org/api_docs/python/tf/sequence_mask
    # - Returns a mask tensor representing the first N positions of each cell.
Exemplo n.º 27
0
    if args.Mode == 'Init_Dir':

        create_dir(dict_dir)

    if args.Mode == 'Mask':

        create_mask('Train', args.Num, dict_dir)
        create_mask('Val', args.Num, dict_dir)

    if args.Mode == 'Run':

        # create validation set:
        X_val, y_val = create_dataset('Val', 2000, dict_dir)

        # compile
        model = model()
        print(model.summary())

        model.compile(optimizer='Adamax',
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

        Model_Checkpoints = dict_dir[
            'Saved_Models'] + 'Checkpoint_' + now.strftime(
                "%d_%m_%H%M") + '.h5'

        callbacks = [
            EarlyStopping(monitor='val_loss',
                          patience=5,
                          verbose=1,
                          mode='auto',
            PIL_img = Image.open(imagePath).convert('L')
        except:
            continue    
        img_numpy = np.array(PIL_img,'uint8')

        id = int(os.path.split(imagePath)[-1].split(".")[1])
        faceSamples.append(img_numpy)
        ids.append(id)
    return faceSamples,ids

print ("\n [INFO] Training faces now.")
faces,ids = getImagesAndLabels(path)

K.clear_session()
n_faces = len(set(ids))
model = model((32,32,1),n_faces)
faces = np.asarray(faces)
faces = np.array([downsample_image(ab) for ab in faces])
ids = np.asarray(ids)
faces = faces[:,:,:,np.newaxis]
print("Shape of Data: " + str(faces.shape))
print("Number of unique faces : " + str(n_faces))


ids = to_categorical(ids)

faces = faces.astype('float32')
faces /= 255.

x_train, x_test, y_train, y_test = train_test_split(faces,ids, test_size = 0.20, random_state = 0)
Exemplo n.º 29
0
# pca visualization to get number of components
pca(X_train)

# dimensionality reduction
X_train_reduced, X_test_reduced = dimension_reduction('PCA', 20, X_train,
                                                      X_test)

# dealing with imbalanced class
X_train_smote, y_train_smote = class_imbalance(X_train_reduced, y_train)

# machine learning model
metrics = ['accuracy', 'f1', 'precision', 'recall', 'roc_auc']

# 1. Logistic Regression
# KFold cross validation
model_res = model('LR', 'KFold', metrics, X_train_smote, X_test_reduced,
                  y_train_smote)
# StratifiedKFold cross validation
model_res = model('LR', 'StratifiedKFold', metrics, X_train_smote,
                  X_test_reduced, y_train_smote)
# make prediction
prediction(model_res, 'Linear Regression', X_train_smote, y_train_smote,
           X_test_reduced, y_test)

# 2. XGBoost
# KFold cross validation
model_res = model('XGB', 'KFold', metrics, X_train_smote, X_test_reduced,
                  y_train_smote)
# StratifiedKFold cross validation
model_res = model('XGB', 'StratifiedKFold', metrics, X_train_smote,
                  X_test_reduced, y_train_smote)
# make prediction
Exemplo n.º 30
0
for i in all_data:
    data = np.vstack((data, np.load(i)))
    print(str(x) + "/" + str(len(all_data)) + " items loaded!")
    x += 1
print("Data loaded!")

random.shuffle(data)  # Shuffle data (important when doing machine learning!)
train_data = data[HM_TEST:]
test_data = data[:HM_TEST]

train_x = [i[0] for i in train_data]
train_y = [i[1] for i in train_data]

test_x = [i[0] for i in test_data]
test_y = [i[1] for i in test_data]

model = model(WIDTH, HEIGHT, LR)

model.fit({'input_pic': train_x}, {'targets': train_y},
          n_epoch=EPOCHS,
          validation_set=({
              'input_pic': test_x
          }, {
              'targets': test_y
          }),
          snapshot_step=100,
          show_metric=True,
          batch_size=200,
          validation_batch_size=100)

model.save(MODEL_NAME)
Exemplo n.º 31
0
def project_tsne(params, dataset, pairs_x, pairs_y, dist, P_joint, device):
    print("---------------------------------")
    print("Begin finding the embedded space")

    net = model(params.col, params.output_dim)
    Project_DNN = init_model(net, device, restore=None)

    optimizer = optim.RMSprop(Project_DNN.parameters(), lr=params.lr)
    c_mse = nn.MSELoss()
    Project_DNN.train()

    dataset_num = len(dataset)

    for i in range(dataset_num):
        P_joint[i] = torch.from_numpy(P_joint[i]).float().to(device)
        dataset[i] = torch.from_numpy(dataset[i]).float().to(device)

    for epoch in range(params.epoch_DNN):
        len_dataloader = np.int(np.max(params.row) / params.batch_size)
        if len_dataloader == 0:
            len_dataloader = 1
            params.batch_size = np.max(params.row)
        for step in range(len_dataloader):
            KL_loss = []
            for i in range(dataset_num):
                random_batch = np.random.randint(0, params.row[i],
                                                 params.batch_size)
                data = dataset[i][random_batch]
                P_tmp = torch.zeros([params.batch_size,
                                     params.batch_size]).to(device)
                for j in range(params.batch_size):
                    P_tmp[j] = P_joint[i][random_batch[j], random_batch]
                P_tmp = P_tmp / torch.sum(P_tmp)
                low_dim_data = Project_DNN(data, i)
                Q_joint = Q_tsne(low_dim_data)

                KL_loss.append(torch.sum(P_tmp * torch.log(P_tmp / Q_joint)))

            feature_loss = np.array(0)
            feature_loss = torch.from_numpy(feature_loss).to(device).float()
            for i in range(dataset_num - 1):
                low_dim = Project_DNN(dataset[i][pairs_x[i]], i)
                low_dim_biggest_dataset = Project_DNN(
                    dataset[dataset_num - 1][pairs_y[i]],
                    len(dataset) - 1)
                feature_loss += c_mse(low_dim, low_dim_biggest_dataset)
                # min_norm = torch.min(torch.norm(low_dim), torch.norm(low_dim_biggest_dataset))
                # feature_loss += torch.abs(torch.norm(low_dim) - torch.norm(low_dim_biggest_dataset))/min_norm

            loss = params.beta * feature_loss
            for i in range(dataset_num):
                loss += KL_loss[i]

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        if (epoch + 1) % params.log_DNN == 0:
            print("epoch:[{:d}/{}]: loss:{:4f}, align_loss:{:4f}".format(epoch+1, \
             params.epoch_DNN, loss.data.item(), feature_loss.data.item()))

    integrated_data = []
    for i in range(dataset_num):
        integrated_data.append(Project_DNN(dataset[i], i))
        integrated_data[i] = integrated_data[i].detach().cpu().numpy()
    print("Done")
    return integrated_data