Пример #1
0
 def forward(self, input_seq, input_length, max_length):
     reference = get_target(input_seq, start_nsw, end_nsw)[0]
     encoder_outputs, encoder_hidden = self.encoder(input_seq, input_length)
     decoder_hidden = encoder_hidden[:decoder.n_layers]
     decoder_input = torch.ones(1, 1, device=device,
                                dtype=torch.long) * SOS_token
     all_tokens = torch.zeros([0], device=device, dtype=torch.long)
     all_scores = torch.zeros([0], device=device)
     for _ in range(max_length):
         decoder_output, decoder_hidden = self.decoder(
             decoder_input, decoder_hidden, encoder_outputs)
         tmp = decoder_input[0]
         if tmp == 2:
             return all_tokens, all_scores
         if tmp == voc.word2index[' ']:
             if (len(reference) >= 1):
                 decoder_input = reference.pop(0).item()
                 if decoder_input == w_char:
                     decoder_input = u_char
             else:
                 decoder_input = 2  #### EOS
             decoder_input = torch.tensor([decoder_input], device=device)
         else:
             decoder_scores, decoder_input = torch.max(decoder_output,
                                                       dim=1)
         all_tokens = torch.cat((all_tokens, decoder_input), dim=0)
         all_scores = torch.cat((all_scores, decoder_scores), dim=0)
         decoder_input = torch.unsqueeze(decoder_input, 0)
     return all_tokens, all_scores
Пример #2
0
def train(model, gpt_model, iterator, optimizer, criterion, args):
    total_loss = 0
    iter_num = 0
    train_acc = 0
    global iteration

    model.train()
    gpt_model.eval()
    
    if args.useKey == 'True':
        keyword = keyword_loader(args, 'train')
    
    for step, batch in enumerate(iterator):

        optimizer.zero_grad()
        enc_inputs = batch.que
    
        copy_dec_inputs = copy.deepcopy(batch.ans)
        copy_dec_target = copy.deepcopy(batch.ans)
        
        dec_inputs = get_dec_inputs(copy_dec_inputs, gpt_pad_token, gpt_eos_token)
        
        target_ = get_target(copy_dec_target, gpt_pad_token)
        target_ = target_.view(-1)
        
        
        with torch.no_grad():
            dec_inputs = gpt_model(dec_inputs)
    
        #segment_ids, valid_len = get_segment_ids_vaild_len(enc_inputs, pad_token_idx)
        #attention_mask = gen_attention_mask(enc_inputs, valid_len)
        

        if args.useKey == 'True':
            outputs = model(enc_inputs, dec_inputs, keyword[step])
        else:
            outputs = model(enc_inputs, dec_inputs, None)
        
        loss = criterion(outputs, target_)

        loss.backward()
        optimizer.step()

        total_loss += loss
        iter_num += 1
        with torch.no_grad():
            tr_acc = acc(outputs, target_, gpt_pad_token)
        train_acc += tr_acc

        if step % 2 == 0:
            total_train_loss.append(total_loss.data.cpu().numpy() / iter_num)
            iteration_list.append(iteration)
            iteration += 1
        
        # test_time_visual(args, enc_inputs, outputs, target_, bert_tokenizer, gpt_vocab)

    return total_loss.data.cpu().numpy() / iter_num, train_acc.data.cpu().numpy() / iter_num
Пример #3
0
def test_target_content():
    if utils.IMGFAC_URL.find("localhost") >= 0 and os.path.isfile(
            utils.IMGFAC_TCXML) and os.path.isfile(utils.IMGFAC_CONF):
        for target_imageid, target_imagestatus in target_built.itervalues():
            if target_imagestatus == 'COMPLETE':
                imagejson = utils.get_target(target_imageid)
                yield _assert_target_content_installed, imagejson[
                    'target'], target_imageid
    else:
        print "Skipping target images inspection: imgfac is not running locally? target_content.xml missing? imagefactory.conf misplaced?"
Пример #4
0
    def forward(self, data):
        mse = nn.MSELoss(reduction='mean').to(self.device)
        l1norm = nn.L1Loss(reduction='mean').to(self.device)

        real_targets = utils.get_target(True, self.inverted,
                                        data.real_x_predictions.size(),
                                        self.device)
        synthesis_targets = utils.get_target(
            False, self.inverted, data.synthesis_x_predictions.size(),
            self.device)

        return mse(data.real_x_predictions, real_targets) +\
            mse(data.synthesis_x_predictions, synthesis_targets) +\
            mse(data.real_y_predictions, real_targets) +\
            mse(data.synthesis_y_predictions, synthesis_targets) +\
            self.regularizer * l1norm(data.id_x_approximations,
                                      data.real_x) +\
            self.regularizer * l1norm(data.id_y_approximations,
                                      data.real_y)
Пример #5
0
def main():
    df, allVisitorID = preproc.read_data()
    x = collect_features(df)
    y = utils.get_target(df, allVisitorID)  # Dict: fullVisitorID to target
    pdb.set_trace()

    scaler = StandardScaler()
    x = scaler.fit_transform(x)
    clustering.apply_kmeans(x, x)
    clustering.apply_hierarchical(x, x)
    """TODO:
Пример #6
0
def test_target_content():
    if (
        utils.IMGFAC_URL.find("localhost") >= 0
        and os.path.isfile(utils.IMGFAC_TCXML)
        and os.path.isfile(utils.IMGFAC_CONF)
    ):
        for target_imageid, target_imagestatus in target_built.itervalues():
            if target_imagestatus == "COMPLETE":
                imagejson = utils.get_target(target_imageid)
                yield _assert_target_content_installed, imagejson["target"], target_imageid
    else:
        print "Skipping target images inspection: imgfac is not running locally? target_content.xml missing? imagefactory.conf misplaced?"
Пример #7
0
def send_json():
    _handle_args(request.args)
    uptime = datetime.datetime.now().replace(microsecond=0) - startTime
    return jsonify({
        'Release': MY_RELEASE,
        'StartTime': startTimeStr,
        'Uptime': str(uptime),
        'Hostname': utils.get_hostname(),
        'LocalAddress': utils.get_local_address(),
        'RemoteAddress': request.remote_addr,
        'ServerHit': str(utils.get_server_hit_count()),
        'WorkerInstance': utils.get_worker_instance(),
        'Target': utils.get_target(),
    })
def valid(model, iterator, optimizer, criterion, args, bert_tok):
    total_loss = 0
    iter_num = 0
    test_acc = 0
    model.eval()

    if args.useKey == 'True':
        keyword, refine_idx = keyword_loader(args, 'valid', bert_tok)

    with torch.no_grad():
        for step, batch in enumerate(iterator):
            enc_inputs = batch.que

            copy_dec_inputs = copy.deepcopy(batch.ans)
            copy_dec_target = copy.deepcopy(batch.ans)

            dec_inputs = get_dec_inputs(copy_dec_inputs, gpt_pad_token,
                                        gpt_eos_token)
            target_ = get_target(copy_dec_target, gpt_pad_token)
            target_ = target_.view(-1)

            segment_ids, valid_len = get_segment_ids_vaild_len(
                enc_inputs, pad_token_idx)
            attention_mask = gen_attention_mask(enc_inputs, valid_len)

            if args.useKey == 'True':
                outputs = model(enc_inputs, dec_inputs, segment_ids,
                                attention_mask, keyword[step],
                                refine_idx[step])
            else:
                outputs = model(enc_inputs, dec_inputs, segment_ids,
                                attention_mask, None, refine_idx[step])

            loss = criterion(outputs, target_)

            total_loss += loss
            iter_num += 1
            te_acc = acc(outputs, target_, gpt_pad_token)

            test_time_visual(args, enc_inputs, outputs, target_,
                             bert_tokenizer, gpt_vocab)
            test_acc += te_acc

        return total_loss.data.cpu().numpy() / iter_num, test_acc.data.cpu(
        ).numpy() / iter_num
Пример #9
0
def show_details():
    _handle_args(request.args)
    uptime = datetime.datetime.now().replace(microsecond=0) - startTime
    return "<html>" + \
           "<head><title>Demo Application</title></head>" + \
           "<body>" + \
           "<table>" + \
           "<tr><td> Release </td> <td>" + MY_RELEASE + "</td> </tr>" + \
           "<tr><td> Start Time </td> <td>" + startTimeStr + "</td> </tr>" + \
           "<tr><td> Up Time </td> <td>" + str(uptime) + "</td> </tr>" + \
           "<tr><td> Hostname </td> <td>" + utils.get_hostname() + "</td> </tr>" + \
           "<tr><td> Local Address </td> <td>" + utils.get_local_address() + "</td> </tr>" + \
           "<tr><td> Remote Address </td> <td>" + request.remote_addr + "</td> </tr>" + \
           "<tr><td> Server Hit </td> <td>" + str(utils.get_server_hit_count()) + "</td> </tr>" + \
           "<tr><td> Worker Instance </td> <td>" + utils.get_worker_instance() + "</td> </tr>" + \
           "<tr><td> Target </td> <td>" + utils.get_target() + "</td> </tr>" + \
           "</table>" + \
           "</body>" + \
           "</html>"
Пример #10
0
def test(model, gpt_model, iterator, optimizer, criterion, args):
    total_loss = 0
    iter_num = 0
    test_acc = 0
    model.eval()
    gpt_model.eval()
    
    if args.useKey == 'True':
        keyword = keyword_loader(args, 'test')

    with torch.no_grad():
        for step, batch in enumerate(iterator):
            enc_inputs = batch.que

            copy_dec_inputs = copy.deepcopy(batch.ans)
            copy_dec_target = copy.deepcopy(batch.ans)

            dec_inputs = get_dec_inputs(copy_dec_inputs, gpt_pad_token, gpt_eos_token)
            target_ = get_target(copy_dec_target, gpt_pad_token)
            target_ = target_.view(-1)

            with torch.no_grad():
                dec_inputs = gpt_model(dec_inputs)

            #segment_ids, valid_len = get_segment_ids_vaild_len(enc_inputs, pad_token_idx)
            #attention_mask = gen_attention_mask(enc_inputs, valid_len)
            
            if args.useKey == 'True':
                outputs = model(enc_inputs, dec_inputs, keyword[step])
            else:
                outputs = model(enc_inputs, dec_inputs, None)

            loss = criterion(outputs, target_)

            total_loss += loss
            iter_num += 1
            te_acc = acc(outputs, target_, gpt_pad_token)

            test_acc += te_acc

        return total_loss.data.cpu().numpy() / iter_num, test_acc.data.cpu().numpy() / iter_num
Пример #11
0
 def test_get_target(self):
     with patch.dict(os.environ, {'TARGET': 'foo'}):
         self.assertEqual('foo', utils.get_target())
Пример #12
0
def train():

    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('--batch_size', type=int)
    parser.add_argument('--n_epochs', default=2, type=int)
    args = parser.parse_args()

    batch_size = args.batch_size
    n_epochs = args.n_epochs

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.manual_seed(7)
    n_residual_blocks = 9
    use_dropout = False
    path_train_data_x = 'trainA'
    path_train_data_y = 'trainB'
    path_model_xy = f'cyclegan_gen_AB_{n_epochs}_{batch_size}'
    path_model_yx = f'cyclegan_gen_BA_{n_epochs}_{batch_size}'
    # learning_rate = .002
    regularizer = 10
    n_discriminator_steps = 1
    image_size = (256, 256)

    discr_x = Discriminator()
    discr_x = discr_x.to(device)
    discr_y = Discriminator()
    discr_y = discr_y.to(device)
    gen_xy = Generator(n_residual_blocks, use_dropout)
    gen_xy = gen_xy.to(device)
    gen_yx = Generator(n_residual_blocks, use_dropout)
    gen_yx = gen_yx.to(device)
    discr_x.apply(utils.init_weights_gaussian)
    discr_y.apply(utils.init_weights_gaussian)
    gen_xy.apply(utils.init_weights_gaussian)
    gen_yx.apply(utils.init_weights_gaussian)

    optimizer_discr_x = optim.Adam(discr_x.parameters())
    optimizer_discr_y = optim.Adam(discr_y.parameters())
    optimizer_gen_xy = optim.Adam(gen_xy.parameters())
    optimizer_gen_yx = optim.Adam(gen_yx.parameters())

    scheduler_discr_x = utils.HingeScheduler(optimizer_discr_x, .0001,
                                             n_epochs / 2, n_epochs / 2)
    scheduler_discr_y = utils.HingeScheduler(optimizer_discr_y, .0001,
                                             n_epochs / 2, n_epochs / 2)
    scheduler_gen_xy = utils.HingeScheduler(optimizer_gen_xy, .0002,
                                            n_epochs / 2, n_epochs / 2)
    scheduler_gen_yx = utils.HingeScheduler(optimizer_gen_yx, .0002,
                                            n_epochs / 2, n_epochs / 2)

    transform = utils.get_transform(image_size)
    train_data_x = torchvision.datasets.ImageFolder(
        root=path_train_data_x, transform=transform)
    train_data_x_loader = torch.utils.data.DataLoader(
        train_data_x, batch_size=batch_size, shuffle=True, num_workers=4)
    train_data_y = torchvision.datasets.ImageFolder(
        root=path_train_data_y, transform=transform)
    train_data_y_loader = torch.utils.data.DataLoader(
        train_data_y, batch_size=batch_size, shuffle=True, num_workers=4)

    synthesis_x_pool = utils.HistoricPool(50)
    synthesis_y_pool = utils.HistoricPool(50)

    mse = nn.MSELoss(reduction='mean').to(device)
    loss_function = CycleGANLoss(regularizer, False, device)

    for epoch_index in range(n_epochs):
        for _ in range(n_discriminator_steps):
            utils.switch_cycle_gradient_requirements(
                discr_x, discr_y, gen_xy, gen_yx, True)
            optimizer_discr_x.zero_grad()
            optimizer_discr_y.zero_grad()
            scheduler_discr_x.step()
            scheduler_discr_y.step()

            cycle_data = utils.get_cycle_data(
                train_data_x_loader, train_data_y_loader, discr_x, discr_y,
                gen_xy, gen_yx, device)

            synthesis_x_pool.update(cycle_data.synthesis_x)
            synthesis_y_pool.update(cycle_data.synthesis_y)
            synthesis_x_batch = synthesis_x_pool.get_batch()
            synthesis_y_batch = synthesis_y_pool.get_batch()

            # Maximizing loss function - hence inverting labels.
            size = cycle_data.real_x_predictions.size()
            real_targets = utils.get_target(True, True, size, device)
            synthesis_targets = utils.get_target(False, True, size, device)

            loss_x = mse(cycle_data.real_x_predictions, real_targets) +\
                mse(discr_x(synthesis_x_batch), synthesis_targets)
            loss_x.backward()
            optimizer_discr_x.step()

            loss_y = mse(cycle_data.real_y_predictions, real_targets) +\
                mse(discr_y(synthesis_y_batch), synthesis_targets)
            loss_y.backward()
            optimizer_discr_y.step()

        utils.switch_cycle_gradient_requirements(
            discr_x, discr_y, gen_xy, gen_yx, False)

        optimizer_gen_xy.zero_grad()
        optimizer_gen_yx.zero_grad()
        scheduler_gen_xy.step()
        scheduler_gen_yx.step()
        cycle_data = utils.get_cycle_data(
            train_data_x_loader, train_data_y_loader, discr_x, discr_y, gen_xy,
            gen_yx, device)
        loss = loss_function(cycle_data)

        loss.backward()
        optimizer_gen_xy.step()
        optimizer_gen_yx.step()
        print(epoch_index)

    torch.save(gen_xy.state_dict(), path_model_xy)
    torch.save(gen_yx.state_dict(), path_model_yx)
Пример #13
0
        gt_points = gt_points.cuda()

        pointsReconstructed = model(pose_points, identity_points)

        rec_loss = torch.mean((pointsReconstructed - gt_points)**2)

        edg_loss = 0

        for i in range(len(random_sample)):

            f = new_face[i].cpu().numpy()
            v = identity_points[i].transpose(0, 1).cpu().numpy()
            edg_loss = edg_loss + utils.compute_score(
                pointsReconstructed[i].unsqueeze(0), f,
                utils.get_target(v, f, 1))

        edg_loss = edg_loss / len(random_sample)

        l2_loss = rec_loss
        rec_loss = rec_loss + 0.0005 * edg_loss
        rec_loss.backward()
        optimizer_G.step()
        total_loss = total_loss + rec_loss

    print('####################################')
    print(epoch)
    print(time.time() - start)
    mean_loss = total_loss / (j + 1)
    print('mean_loss', mean_loss.item())
    print('####################################')
Пример #14
0
import streamlit as st
from plot_func import general_plot, local_plot
from utils import get_dataframe, get_shap_values, get_target, get_model_name, model_fit, get_row_number

st.set_option('deprecation.showfileUploaderEncoding', False)

st.markdown("<h1 style='text-align: center; '> Avec la Lab, attrapez les tous !</h1>",
            unsafe_allow_html=True)
data = get_dataframe()

target = get_target(data)

model_name = get_model_name()

model = model_fit(model_name, data, target)


st.info(" Shap is working hard! Please wait")

shap_values = get_shap_values(model_name, model, data)

st.info(" Thank you")

fig_general = general_plot(shap_values)
st.plotly_chart(fig_general, sharing='streamlit')

row = get_row_number()

fig_local = local_plot(int(row), shap_values)
st.plotly_chart(fig_local, sharing='streamlit')
Пример #15
0
 def Reserve(self):
     self.open_page2("https://jayurocc.com/Reservation/Reservation")
     self.close_popup_class("pointBtn")
     self.GoToReservation(utils.get_today(), utils.get_target(21))
Пример #16
0
def artcoder(STYLE_IMG_PATH,
             CONTENT_IMG_PATH,
             CODE_PATH,
             OUTPUT_DIR,
             LEARNING_RATE=0.01,
             CONTENT_WEIGHT=1e8,
             STYLE_WEIGHT=1e15,
             CODE_WEIGHT=1e15,
             MODULE_SIZE=16,
             MODULE_NUM=37,
             EPOCHS=50000,
             Dis_b=80,
             Dis_w=180,
             Correct_b=50,
             Correct_w=200,
             USE_ACTIVATION_MECHANISM=True):
    # STYLE_IMG_PATH = './style/redwave4.jpg'
    # CONTENT_IMG_PATH = './content/boy.jpg'
    # CODE_PATH = './code/boy.jpg'
    # OUTPUT_DIR = './output/'
    utils.del_file(OUTPUT_DIR)
    IMAGE_SIZE = MODULE_SIZE * MODULE_NUM

    transform = transforms.Compose([
        transforms.Resize(IMAGE_SIZE),
        transforms.ToTensor(),
    ])

    vgg = Vgg16(requires_grad=False).cuda()  # vgg16 model
    ss_layer = SSlayer(requires_grad=False).cuda()

    style_img = utils.load_image(filename=STYLE_IMG_PATH, size=IMAGE_SIZE)
    content_img = utils.load_image(filename=CONTENT_IMG_PATH, size=IMAGE_SIZE)
    code_img = utils.load_image(filename=CODE_PATH, size=IMAGE_SIZE)
    init_img = utils.add_pattern(content_img, code_img)

    style_img = transform(style_img)
    content_img = transform(content_img)
    init_img = transform(init_img)

    init_img = init_img.repeat(1, 1, 1, 1).cuda()
    style_img = style_img.repeat(1, 1, 1, 1).cuda()  # make fake batch
    content_img = content_img.repeat(1, 1, 1, 1).cuda()

    features_style = vgg(style_img)  # feature maps extracted from VGG
    features_content = vgg(content_img)

    gram_style = [utils.gram_matrix(i)
                  for i in features_style]  # gram matrix of style feature
    mse_loss = nn.MSELoss()

    y = init_img.detach(
    )  # y is the target output. Optimized start from the content image.
    y = y.requires_grad_()  # let y to require grad

    optimizer = optim.Adam(
        [y], lr=LEARNING_RATE)  # let optimizer to optimize the tensor y

    error_matrix, ideal_result = utils.get_action_matrix(
        img_target=utils.tensor_to_PIL(y),
        img_code=code_img,
        Dis_b=Dis_b,
        Dis_w=Dis_w)
    code_target = ss_layer(
        utils.get_target(ideal_result, b_robust=Correct_b, w_robust=Correct_w))

    print(" Start training =============================================")
    for epoch in range(EPOCHS):

        def closure(code_target=code_target):

            optimizer.zero_grad()
            y.data.clamp_(0, 1)
            features_y = vgg(y)  # feature maps of y extracted from VGG
            gram_style_y = [
                utils.gram_matrix(i) for i in features_y
            ]  # gram matrixs of feature_y in relu1_2,2_2,3_3,4_3

            fc = features_content.relu3_3  # content target in relu4_3
            fy = features_y.relu3_3  # y in relu4_3

            style_loss = 0  # add style_losses in relu1_2,2_2,3_3,4_3
            for i in [0, 1, 2, 3]:
                style_loss += mse_loss(gram_style_y[i], gram_style[i])
            style_loss = STYLE_WEIGHT * style_loss

            code_y = ss_layer(y)

            if USE_ACTIVATION_MECHANISM == 1:
                error_matrix, ideal_result = utils.get_action_matrix(
                    img_target=utils.tensor_to_PIL(y),
                    img_code=code_img,
                    Dis_b=Dis_b,
                    Dis_w=Dis_w)
                activate_num = np.sum(error_matrix)
                activate_weight = torch.tensor(error_matrix.astype('float32'))
                code_y = code_y.cpu() * activate_weight
                code_target = code_target.cpu() * activate_weight
            else:
                code_y = code_y.cpu()
                code_target = code_target.cpu()
                activate_num = MODULE_NUM * MODULE_NUM

            code_loss = CODE_WEIGHT * mse_loss(code_target.cuda(),
                                               code_y.cuda())
            content_loss = CONTENT_WEIGHT * mse_loss(fc, fy)  # content loss

            # tv_loss = TV_WEIGHT * (torch.sum(torch.abs(y[:, :, :, :-1] - y[:, :, :, 1:])) +
            #                        torch.sum(torch.abs(y[:, :, :-1, :] - y[:, :, 1:, :])))

            total_loss = style_loss + code_loss + content_loss
            total_loss.backward(retain_graph=True)

            if epoch % 20 == 0:
                print(
                    "Epoch {}: Style Loss : {:4f}. Content Loss: {:4f}. Code Loss: {:4f}. Activated module number: {:4.2f}. Discriminate_b:{:4.2f}. Discriminate_w:{:4.2f}."
                    .format(epoch, style_loss, content_loss, code_loss,
                            activate_num, Dis_b, Dis_w))
            if epoch % 200 == 0:
                img_name = 'epoch=' + str(epoch) + '__Wstyle=' + str(
                    "%.1e" % STYLE_WEIGHT) + '__Wcode=' + str(
                        "%.1e" % CODE_WEIGHT) + '__Wcontent' + str(
                            "%.1e" % CONTENT_WEIGHT) + '.jpg'
                utils.save_image_epoch(y,
                                       OUTPUT_DIR,
                                       img_name,
                                       code_img,
                                       addpattern=True)
                print('Save output: ' + img_name)
                return total_loss

        optimizer.step(closure)
Пример #17
0
 def Reserve(self):
     print("Reserve")
     self.GoToReservation(utils.get_today(), utils.get_target(21))