args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

# NLP
nlp = spacy.load('en')

# Data set roots
training_path = os.path.join(args.root, "training")
validation_path = os.path.join(args.root, "validation")

# CNN Distance learning
model = models.CNNCDist(
    window_size=window_size,
    vocab_size=settings.voc_sizes[args.n_gram],
    n_classes=1,
    temporal_division=1,
    out_channels=(args.n_filters, args.n_filters, args.n_filters),
    embedding_dim=args.dim,
    n_linear=args.n_linear
)
if args.cuda:
    model.cuda()
# end if

# Load model and voc
model.load_state_dict(torch.load(open(args.model, 'rb')))
if args.cuda:
    model.cuda()
# end if
voc = torch.load(open(args.voc, 'rb'))
예제 #2
0
        # Concatenate
        sides = torch.cat((side1, side2), dim=1)

        # Set
        batch[b] = sides
    # end for

    # Add to batch
    batches.append((batch, batch_truth))
# end for

# Loss function
loss_function = nn.MSELoss()

# Bi-directional Embedding GRU
model = models.CNNCDist()
if args.cuda:
    model.cuda()
# end if
best_model = copy.deepcopy(model.state_dict())
best_acc = 0.0

# Optimizer
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

# For each iteration
for epoch in range(n_epoch):
    # Total losses
    training_loss = 0.0
    training_total = 0.0
예제 #3
0
# Training and validation
total_size = len(batches)
validation_size = int(total_size * settings.validation_ratio)
training_size = total_size - validation_size
training_set = batches[:training_size]
validation_set = batches[training_size:]

# Loss function
loss_function = nn.CrossEntropyLoss()

# CNN Distance learning
# model = models.CNNCDist(window_size=settings.cnn_window_size, vocab_size=settings.voc_sizes[args.n_gram], n_classes=2)
model = models.CNNCDist(window_size=settings.cnn_window_size,
                        vocab_size=settings.voc_sizes[args.n_gram],
                        n_classes=2,
                        temporal_division=args.temporal_division,
                        out_channels=(args.n_filters, args.n_filters,
                                      args.n_filters))
if args.cuda:
    model.cuda()
# end if
best_model = copy.deepcopy(model.state_dict())
best_acc = 0.0

# Optimizer
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

# For each iteration
for epoch in range(args.epoch):
    # Total losses
    training_loss = 0.0
예제 #4
0
        # Concatenate
        sides = torch.cat((side1, side2), dim=1)

        # Set
        batch[b] = sides
    # end for

    # Add to batch
    batches.append((batch, batch_truth))
# end for

# Loss function
loss_function = nn.MSELoss()

# Bi-directional Embedding GRU
model = models.CNNCDist(window_size=settings.cnn_window_size, vocab_size=settings.voc_sizes[args.n_gram])
if args.cuda:
    model.cuda()
# end if
best_model = copy.deepcopy(model.state_dict())
best_acc = 0.0

# Optimizer
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

# For each iteration
for epoch in range(n_epoch):
    # Total losses
    training_loss = 0.0
    training_total = 0.0