Example #1
0
def main():
    Collector(define, 'def_base', 'def_secrets')  # pick options
    fake_tlc = Collector(None, 'def_kwargs', update_id=None)

    global item_update_id
    item_update_id = Item(fake_tlc, 'update_id')

    tornado.options.parse_command_line()
    print("Server listening on port " + str(options.port))
    logging.getLogger().setLevel(logging.DEBUG)

    factory = make_session_factory(options.dburl)
    app = Application(session_factory=factory)
    http_server = tornado.httpserver.HTTPServer(app)
    http_server.listen(options.port)
    # im-send-btn im-chat-input--send _im_send im-send-btn_send
    # im_chat-input--buttons
    loop = tornado.ioloop.IOLoop.instance()
    # period_cbk = tornado.ioloop.PeriodicCallback(
    #     app.update_tg_bot_message,
    #     10 * 1000,  # раз в 10 секунд
    # )
    # period_cbk.start()

    loop.start()
Example #2
0
def plot_representation(model, loader, writer, device, step):
    collector = Collector()
    collector.collect_representation(model.get_embedding_model())
    collector.collect_attention(model.get_embedding_model())
    with torch.no_grad():
        model.eval()
        correct, count = 0, 0
        for batch in loader:
            input_ids = batch["inputs"]["input_ids"].to(device)
            attention_mask = batch["inputs"]["attention_mask"].to(device)
            labels = batch["labels"].to(device)
            outputs = model(input_ids=input_ids, attention_mask=attention_mask)
            pred = outputs.argmax(dim=1)
            correct += (labels == pred).float().sum()
            count += labels.shape[0]
            for i in range(12):
                k = "encoder.%d.mhsa_attn" % i
                t = collector.activations[k]
                topk, _ = torch.topk(t, 5, dim=3)
                topk_mask = t >= topk[:, :, :, [-1]]
                mask = (attention_mask.view(
                    -1, 1, 1, attention_mask.shape[1]).expand_as(t).cpu())
                topk_mask = topk_mask.view(-1, 1, topk_mask.shape[2],
                                           topk_mask.shape[3])
                t = t.view(-1, 1, t.shape[2], t.shape[3])
                mask = mask.reshape(-1, 1, t.shape[2], t.shape[3])
                t = torch.cat(
                    (topk_mask.float(), 1 - mask.float(), torch.zeros_like(t)),
                    dim=1)
                writer.add_image("vis/%s" % k,
                                 make_grid(t, nrow=12, pad_value=0.5), step)
            break
        model.train()
    collector.remove_all_hook()
Example #3
0
if knobs["fast_models"]:
    encoder = FastEncoder().to(knobs["device"])
    decoder = FastDecoder().to(knobs["device"])
    discriminator = FastDiscriminator().to(knobs["device"])
else:
    encoder = Encoder().to(knobs["device"])
    decoder = Decoder().to(knobs["device"])
    discriminator = Discriminator().to(knobs["device"])

opt_encoder = torch.optim.Adam(encoder.parameters(), lr=knobs["lr_encoder"])
opt_decoder = torch.optim.Adam(decoder.parameters(), lr=knobs["lr_decoder"])
opt_discriminator = torch.optim.Adam(discriminator.parameters(),
                                     lr=knobs["lr_discriminator"])

collector_reconstruction_loss = Collector()
collector_wasserstein_penalty = Collector()
collector_fooling_term = Collector()
collector_error_discriminator = Collector()
collector_heuristic_discriminator = Collector()
collector_codes_min = Collector()
collector_codes_max = Collector()
if knobs["resume"]:
    writer = SummaryWriter(log_dir_last_modified)
    checkpoint_dir = checkpoints_dir_last_modified
    checkpoint = torch.load(checkpoint_dir)
    starting_epoch = checkpoint["epoch"]
    iteration = checkpoint["iteration"]
    encoder.load_state_dict(checkpoint["encoder_state_dict"])
    decoder.load_state_dict(checkpoint["decoder_state_dict"])
    discriminator.load_state_dict(checkpoint["discriminator_state_dict"])
Example #4
0
if knobs["fast_models"]:
    encoder = FastEncoder().to(knobs["device"])
    decoder = FastDecoder().to(knobs["device"])
    discriminator = FastDiscriminator().to(knobs["device"])
else:
    encoder = Encoder().to(knobs["device"])
    decoder = Decoder().to(knobs["device"])
    discriminator = Discriminator().to(knobs["device"])

opt_encoder = torch.optim.Adam(encoder.parameters(), lr=knobs["lr_encoder"])
opt_decoder = torch.optim.Adam(decoder.parameters(), lr=knobs["lr_decoder"])
opt_discriminator = torch.optim.Adam(discriminator.parameters(),
                                     lr=knobs["lr_discriminator"])

collector_reconstruction_loss = Collector()
collector_imq_mmd = Collector()
collector_fooling_term = Collector()
collector_error_discriminator = Collector()
collector_heuristic_discriminator = Collector()
collector_codes_min = Collector()
collector_codes_max = Collector()
if knobs["resume"]:
    writer = SummaryWriter(log_dir_last_modified)
    checkpoint_dir = checkpoints_dir_last_modified
    checkpoint = torch.load(checkpoint_dir)
    starting_epoch = checkpoint["epoch"]
    iteration = checkpoint["iteration"]
    encoder.load_state_dict(checkpoint["encoder_state_dict"])
    decoder.load_state_dict(checkpoint["decoder_state_dict"])
    discriminator.load_state_dict(checkpoint["discriminator_state_dict"])
Example #5
0
from preprocessing import get_loader, inv_standardize
from utils import (Collector, reconstruction_loss_func,
                   wasserstein_penalty_func)
from config import (knobs, log_dir_local_time, log_dir_last_modified,
                    checkpoints_dir_local_time, checkpoints_dir_last_modified,
                    interpolations_dir)

loader = get_loader()

encoder = Encoder().to(knobs["device"])
decoder = Decoder().to(knobs["device"])

opt_encoder = torch.optim.Adam(encoder.parameters(), lr=knobs["lr_encoder"])
opt_decoder = torch.optim.Adam(decoder.parameters(), lr=knobs["lr_decoder"])

collector_reconstruction_loss = Collector()
collector_wasserstein_penalty = Collector()
collector_fooling_term = Collector()
collector_codes_min = Collector()
collector_codes_max = Collector()
if knobs["resume"]:
    writer = SummaryWriter(log_dir_last_modified)
    checkpoint_dir = checkpoints_dir_last_modified
    checkpoint = torch.load(checkpoint_dir)
    starting_epoch = checkpoint["epoch"]
    iteration = checkpoint["iteration"]
    encoder.load_state_dict(checkpoint["encoder_state_dict"])
    decoder.load_state_dict(checkpoint["decoder_state_dict"])
    opt_encoder.load_state_dict(checkpoint["opt_encoder_state_dict"])
    opt_decoder.load_state_dict(checkpoint["opt_decoder_state_dict"])
else:
Example #6
0
from models import Encoder, Decoder
from preprocessing import get_loader, inv_standardize
from utils import (Collector, reconstruction_loss_func, imq_mmd_func)
from config import (knobs, log_dir_local_time, log_dir_last_modified,
                    checkpoints_dir_local_time, checkpoints_dir_last_modified,
                    interpolations_dir)

loader = get_loader()

encoder = Encoder().to(knobs["device"])
decoder = Decoder().to(knobs["device"])

opt_encoder = torch.optim.Adam(encoder.parameters(), lr=knobs["lr_encoder"])
opt_decoder = torch.optim.Adam(decoder.parameters(), lr=knobs["lr_decoder"])

collector_reconstruction_loss = Collector()
collector_imq_mmd = Collector()
collector_fooling_term = Collector()
collector_codes_min = Collector()
collector_codes_max = Collector()
if knobs["resume"]:
    writer = SummaryWriter(log_dir_last_modified)
    checkpoint_dir = checkpoints_dir_last_modified
    checkpoint = torch.load(checkpoint_dir)
    starting_epoch = checkpoint["epoch"]
    iteration = checkpoint["iteration"]
    encoder.load_state_dict(checkpoint["encoder_state_dict"])
    decoder.load_state_dict(checkpoint["decoder_state_dict"])
    opt_encoder.load_state_dict(checkpoint["opt_encoder_state_dict"])
    opt_decoder.load_state_dict(checkpoint["opt_decoder_state_dict"])
else: