Example #1
0
def main():
    calAndSaveFeatures()
    starttime = time()

    if len(sys.argv) <= 1:
        print("Missing arguments")
    else:
        fileName = sys.argv[1]
        arguments = {"show" : True, "save" : False, "saveInfo" : False}
        for i in range(2, len(sys.argv)):
            if (sys.argv[i].split("=")[0])[2:] in arguments.keys():
                arguments[(sys.argv[i].split("=")[0])[2:]] = \
                    (sys.argv[i].split("=")[1] == str(True))

        print("loading model...")
        clf = loadModel()
        detector = Detector(clf)

        print("detecting...")

        detector.detectFace(fileName, _show=arguments['show'], _save=arguments['save'],
                            _saveInfo=arguments['saveInfo'])

        endtime = time()
        print("cost: " + str(endtime - starttime))
Example #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--train', action='store_true')
    parser.add_argument('--test', action='store_true')
    parser.add_argument('--model')
    args = parser.parse_args()

    if args.train and args.model:
        nn = model.generate_model()
        training_input = data_utils.load_data(training_input_dir)
        training_output = data_utils.load_data(training_output_dir)
        nn.fit(training_input, training_output, batch_size=128, epochs=50)
        model.saveModel(nn, args.model)
        test = input("Do you want to test with the test images too? ")
        if test == 'yes':
            test_input = data_utils.load_data(test_input_dir)
            test_output = nn.predict(test_input)
            print(test_output.shape)
            data_utils.save_images(test_output_dir, test_input_dir,
                                   test_output)
    elif args.test and args.model:
        nn = model.loadModel(args.model)
        test_input = data_utils.load_data(test_input_dir)
        test_output = nn.predict(test_input)
        print(test_output.shape)
        data_utils.save_images(test_output_dir, test_input_dir, test_output)
Example #3
0
def test_hmm():
    m = model.loadModel("hmm.pkl")
    sentence = wordsToIds("what is your name")[1:-1]
    print(idsToWords(sentence))
    output = m.getOutput(sentence)
    print(idsToWords(output))

    loader = loadDataset(set_name="test")

    with open("data/testdata/result.txt", "a") as result_file:
        c = 0
        for sample in loader:
            if c > 3767:
                input_sentence = sample["input"][0][1:-1]
                #target_sentence = sample["target"][0]

                output = idsToWords(m.getOutput(input_sentence))

                try:
                    last_index = output.index("<EOS>")
                    line = " ".join(output[:last_index]) + "\n"
                except:
                    line = " ".join(output) + "\n"
                result_file.write(line)

            print(c, end="\r")
            c += 1
Example #4
0
    def __init__(self, Parent=None):
        '''
        Constructor
        '''
        super().__init__(Parent)

        self.__InitData()  #先初始化数据,再初始化界面
        self.__InitView()
        self.svmModel = model.loadModel()
Example #5
0
def performIndividualTest(inputSentence, weightsFilename):
    tokenizer, wordsToIndex = loadWordMapping()
    sequenceLength = 300
    vecSpaceSize = 8

    #Load the model and its weights
    model = loadModel(len(wordsToIndex) + 1, sequenceLength, vecSpaceSize)
    model.load_weights(weightsFilename)

    X = tokenizer.texts_to_sequences([inputSentence])
    X = pad_sequences(X, maxlen=sequenceLength, padding='post')
    result = model.predict(X)
    print("\tPredicted: {}".format(result))
Example #6
0
def test(inputFileName, imgSize=64):
    # load model
    model = loadModel('model_{0}.hd5'.format(imgSize))

    # load input data
    x_test = loadTestData(inputFileName, imgSize=imgSize)

    # predict
    pred = model.predict(x_test)
    print(pred)

    # show result
    print('Prediction : {0}'.format(pred[0]))
    num = np.argmax(pred[0])
    print('Prediction : {0}'.format(num))
Example #7
0
    def __init__(self):
        super(Example, self).__init__()
        self.model = model.loadModel()
        #resize设置宽高,move设置位置
        self.resize(280, 280)
        self.move(100, 100)
        self.setWindowTitle("test")

        #setMouseTracking设置为False,否则不按下鼠标时也会跟踪鼠标事件
        self.setMouseTracking(False)

        '''
            要想将按住鼠标后移动的轨迹保留在窗体上
            需要一个列表来保存所有移动过的点
        '''
        self.pos_xy = []
Example #8
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'This code will train and test with a new model or loaded one given the desired type of architecture'
    )
    parser.add_argument('--train',
                        action='store_true',
                        help='bool determining whether to train or not')
    parser.add_argument('--test',
                        action='store_true',
                        help='bool determining whether to test or not')
    parser.add_argument(
        '--model',
        help=
        'model name, to be of the form \'srcnn_v2\' with model type and version'
    )
    parser.add_argument('--epochs',
                        help='number of epochs to train with',
                        type=int,
                        default=20)
    parser.add_argument('--batch',
                        help='batch size to calculate loss',
                        type=int,
                        default=64)
    parser.add_argument('--lr',
                        help='model learning rate',
                        type=float,
                        default=0.001)
    parser.add_argument('--validation',
                        help='validation split for training data',
                        type=float,
                        default=0.1)
    args = parser.parse_args()
    checkValid(args.model)
    checkCurrDirectory()

    if args.train and args.model:
        nn = train(args.model, args.epochs, args.batch, args.lr,
                   args.validation)
        test_or_not = input("Do you want to test with the test images too? ")
        if test_or_not == 'yes':
            test(nn, args.model + '/')
    elif args.test and args.model:
        nn = model.loadModel(models_path + args.model + '.h5')
        test(nn, args.model + '/')
Example #9
0
def performBatchTest(weightsFilename):
    tokenizer, wordsToIndex = loadWordMapping()
    datasetDirectory = "aclImdb/test/"
    sequenceLength = 300
    vecSpaceSize = 8

    print("Evaluating model performance...")
    #Get the testing dataset
    reviews, ratings = readDataset(datasetDirectory, sequenceLength,
                                   vecSpaceSize)

    #Load the model and its weights
    model = loadModel(len(wordsToIndex) + 1, sequenceLength, vecSpaceSize)
    model.load_weights(weightsFilename)

    X = tokenizer.texts_to_sequences(reviews)
    X = pad_sequences(X, maxlen=sequenceLength, padding='post')
    loss, accuracy = model.evaluate(X, ratings)
    print("\tLoss: {}".format(loss))
    print("\tAccuracy: {}".format(accuracy))
def main(status):

    if status == 'generate':
        generateDatasets(dataset_name, n_train, n_valid, n_test)
        main('train')
    elif status == 'train':
        model_name = md.train(dataset_name, batch_size=512, debug=False)
        main('evaluate')
        pass
    elif status == 'evaluate':
        model = md.loadModel(model_path)
        dataset = ds.loadDataset(dataset_path + dataset_name + '/' + test_file)
        for path, y in zip(dataset['path'], dataset['y']):
            img = utils.loadImage(path)
            p = model.predict(img[None, :, :, :], batch_size=1)
            print(p, y)
            p = p[0]
            y_pred = [[p[0], p[1]], [p[2], p[3]], [p[4], p[5]], [p[6], p[7]]]
            img_dbg = drawPoints(img, y_pred, colors)
            utils.showImage(img_dbg)
    pass
Example #11
0
    values = line.split("\t")
    line_dict = line2base(targets, values)
    baseline.append(line_dict)

predicted = baseline
tr_pred = baseline
baseline = np.array(baseline)
predicted = np.array(predicted)
tr_pred = np.array(tr_pred)

fh.close()

# Load model, predict targets and exit
if args.load_model != None:
    sys.stderr.write("Loading model from: %s\n" % (args.load_model))
    m = model.loadModel(args.load_model)
    res = m.predict(data_X)
    for r in res:
        print r
    sys.exit()

data_X = np.array(data_X)
data_Y = np.array(data_Y)

# Model cross validation
m = model.Model(model_type, args.model_params, f_select, args.feat_selector_params, sparse=sparse)
pred = data_Y

sys.stderr.write("Starting crossvalidation\n")
# cv = cross_validation.StratifiedKFold(data_X, n_folds=10, shuffle=True, random_state=seed)
# scores = cross_validation.cross_val_score(m, data_X, data_Y, cv=10)
Example #12
0
    line = line.rstrip("\n")
    values = line.split("\t")
    line_dict = line2base(targets, values)
    baseline.append(line_dict)

predicted = baseline
baseline = np.array(baseline)
predicted = np.array(predicted)
probas = predicted

fh.close()

# Load model, predict targets and exit
if args.load_model != None:
    sys.stderr.write("Loading model from: %s\n" % (args.load_model))
    m = model.loadModel(args.load_model)
    res = m.predict(data_X)
    for r in res:
        print r
    sys.exit()

data_X = np.array(data_X)
data_Y = np.array(data_Y)

# Model cross validation
m = model.Model(model_type,
                args.model_params,
                f_select,
                args.feat_selector_params,
                sparse=sparse)
pred = data_Y
Example #13
0
parser.add_argument('data_file', metavar='test_data', type=str)
parser.add_argument('verbose',
                    metavar='verbose',
                    nargs='?',
                    type=int,
                    default=0)
args = parser.parse_args()

verbose = args.verbose
features_ignore = ["old_node_id", "wrong_form_1", "wrong_form_2"]

fh = gzip.open(args.data_file, 'rb', "UTF-8")
line = fh.readline().rstrip("\n")
feature_names = line.split("\t")

m = model.loadModel(args.model_file)
targets = m.get_classes()[0].keys()

base_targets = [x.replace("new", "old") for x in targets]

# Read the data
test_X = []
test_Y = []
base_Y = []
while True:
    line = fh.readline().rstrip("\n")
    if not line:
        break
    feat_values = line.split("\t")

    feat_row = dict()
Example #14
0
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.bind((TCP_HOST, TCP_PORT))
    s.listen(True)

    while True:

        conn, addr = s.accept()
        data = getImage.getImgData(s, conn)

        # Show image for testing, time ~ 5 seconds
        # _show(data)

        status = processing(data, recipient, predictor)

        if status == 0:
            print(status)
        else:
            print('error')


if __name__ == "__main__":
    ip = sys.argv[1]  # IP address of Server
    recipient = sys.argv[2]  # Phone number of Recipient
    # Load model
    path = os.path.join('/home', getpass.getuser(),
                        'findFire/server/models/model_final.pth')
    cuda = False
    predictor = model.loadModel(path, cuda)
    # Listen for incoming images
    getData(ip, recipient, predictor)
Example #15
0
(options, args) = parser.parse_args()
movie = options.movie

if movie == '' or movie == None:
    print('Error. Usage: python index.py --movie "<movie>"')
    sys.exit()

try:
    numberOfTweets = 1
    tweets = []
    ratingValue = 0

    if model.isModelExists():
        print("Loading Classifier Model...")
        vectorizer, classifier = model.loadModel()
    else:
        print("Building Classifier Model...")
        # Start building models
        vectorizer, classifier = model.buildModel()

    print("Retrieving Tweets...")
    tso = TwitterSearchOrder()
    tso.set_keywords([movie])
    tso.set_language('en')
    tso.set_include_entities(False)

    ts = TwitterSearch(
        consumer_key='kbibzVdoRoKOwd3dlxZCobum5',
        consumer_secret='qEz32mJANlQ5hbFGacxmMfO2Pmyexs3WgPFeGq4QzA88qAOKe8',
        access_token='1348353366-ofrMAMNiFfz102VY9c3MXdTrsAD2c4Dq91QiWVD',
Example #16
0
    loadedVectors = np.load("embeddings.npy", mmap_mode="r")
    with open("embeddings.vocab", "r", encoding="utf8") as fileRead:
        for index, word in enumerate(fileRead):
            wordToVecMap[word.strip()] = loadedVectors[index]
            
    return wordToVecMap"""

datasetDirectory = "aclImdb/train/"
sequenceLength = 300
vecSpaceSize = 8

reviews, ratings = readDataset(datasetDirectory, sequenceLength, vecSpaceSize)
#Get embedded matrix representing vocabulary
wordsToIndex, tokenizer = generateWordMapping(reviews)
#Generate model and output summary
model = loadModel(len(wordsToIndex) + 1, sequenceLength, vecSpaceSize)
model.summary()
#Define weights checkpoint
filepath = "data/weights-{epoch:d}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, mode='min')

#Train the model
X = tokenizer.texts_to_sequences(reviews)
X = pad_sequences(X, maxlen=sequenceLength, padding='post')

#print(encodedReviews.shape)
model.fit(X,
          ratings,
          epochs=10,
          batch_size=32,
          shuffle=True,
Example #17
0
def trainModel(n_iters=100000,
               teacher_forcing_ratio=0.,
               print_every=1000,
               plot_every=100,
               learning_rate=0.01,
               max_length=MAX_LENGTH):

    training_pairs, vocab_size, word2ix, ix2word = loadDataset()
    encoder, decoder = loadModel(vocab_size)

    print("Training the model ... ")
    start = time.time()
    plot_losses = []
    print_loss_total = 0  # reset every print_every
    plot_loss_total = 0  # reset every plot_every

    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
    criterion = nn.NLLLoss()

    for iter in range(1, n_iters + 1):
        training_pair = training_pairs[iter - 1]
        input_variable = training_pair['input']
        target_variable = training_pair['target']

        input_variable = Variable(torch.LongTensor(input_variable).view(-1, 1))
        target_variable = Variable(
            torch.LongTensor(target_variable).view(-1, 1))
        if USE_CUDA:
            input_variable = input_variable.cuda()
            target_variable = target_variable.cuda()

        print(input_variable)

        loss = trainIter(input_variable,
                         target_variable,
                         encoder,
                         decoder,
                         encoder_optimizer,
                         decoder_optimizer,
                         criterion,
                         max_length=max_length,
                         teacher_forcing_ratio=teacher_forcing_ratio)
        print_loss_total += loss
        plot_loss_total += loss

        # Keeping track of average loss and printing results on screen
        if iter % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print('%s (%d %d%%) %.4f' %
                  (utils.timeSince(start, iter / n_iters), iter,
                   iter / n_iters * 100, print_loss_avg))

        # Keeping track of average loss and plotting in figure
        if iter % plot_every == 0:
            plot_loss_avg = plot_loss_total / plot_every
            plot_losses.append(plot_loss_avg)

            if min(plot_losses) == plot_loss_avg:
                #we save this version of the model
                torch.save(encoder.state_dict(), "encoder.ckpt")
                torch.save(decoder.state_dict(), "decoder.ckpt")

            plot_loss_total = 0

    utils.showPlot(plot_losses)
Example #18
0
def predictNN(X):
    #------------------------------------------ Load RNN Model --------------------------------------------#
    print("Loading the Neural Network parameters.")
    BiRNNmodel = loadModel("BiRNN")
    predictedBiRNN = BiRNNmodel.predict([X,X], batch_size=10)
    return predictedBiRNN
Example #19
0
# Parse command line arguments
parser = argparse.ArgumentParser(description="Test scikit-learn model accuracy.")
parser.add_argument('model_file', metavar='model_file', type=str)
parser.add_argument('data_file', metavar='test_data', type=str)
parser.add_argument('verbose', metavar='verbose', nargs='?', type=int, default=0)
args = parser.parse_args()

verbose = args.verbose
features_ignore = [ "old_node_id", "wrong_form_1", "wrong_form_2" ]

fh = gzip.open(args.data_file, 'rb', "UTF-8")
line = fh.readline().rstrip("\n")
feature_names = line.split("\t")

m = model.loadModel(args.model_file)
targets = m.get_classes()[0].keys()

base_targets = [x.replace("new", "old") for x in targets]

# Read the data
test_X = []
test_Y = []
base_Y = []
while True:
    line = fh.readline().rstrip("\n")
    if not line:
        break
    feat_values = line.split("\t")

    feat_row = dict()

def clearLists():
    data['links'].clear()
    data['category'].clear()
    data['date'].clear()
    data['image'].clear()
    data['headline'].clear()
    data['article'].clear()
    data['summary'].clear()
    data['sentiment'].clear()


def scraperScheduler():
    getLinks()
    getArticleData()
    storeArticles()
    clearLists()


def startScheduler():
    scheduler = BlockingScheduler()
    scheduler.add_job(scraperScheduler, 'interval', hours=0.5)
    scheduler.start()


if __name__ == '__main__':
    loadModel()
    scraperScheduler()
    startScheduler()
Example #21
0
def preparePredict(imgSize=64):
    # load model
    model = loadModel('model_{0}.hd5'.format(imgSize))

    return model
Example #22
0
    ### Rest of Predictions : Predicted_2 ###
    for i in range(2,X.shape[0]-1):
        input_data = np.vstack(([x3[i-2],y3[i-2]],[x3[i-1],y3[i-1]],[x3[i],y3[i]])).reshape(1,3,2)
        predicted_output = predictNN(input_data)
        x3.append(predicted_output[0][0])
        y3.append(predicted_output[0][1])
    ### Plot Predicted Trajectory ###
    plt.plot(x2, y2, linewidth=1.0, color='green', linestyle='--', marker='*', label='Predicted_1')
    plt.plot(x3, y3, linewidth=2.0, color='red', linestyle='--', marker='v', label='Predicted_2')
    plt.legend()
    plt.savefig("./plots/Projectile_Plot_%s.png" % (y+1))
    plt.clf()

#------------------------------------------ Load RNN Model --------------------------------------------#
print("Loading the Neural Network parameters.")
BiRNNmodel = loadModel("BiRNN")

#------------------------------------------Testing Model on Data------------------------------------------------#

#Import the raw data
print('Reading the data file...')
df = np.loadtxt("./data/projectiles.csv", delimiter=',')
projCount, idx = 0, 0
while idx<df.shape[0]:
    motion = df[idx].reshape(1,3)
    idx = idx+1
    while df[idx][0] != 0:
        motion = np.append(motion, df[idx].reshape(1,3), axis=0)
        idx = idx+1
        if idx==df.shape[0]:
            break
    global guild, guildID, channel, channelID
    print('We have logged in as {0.user}'.format(client))
    guild = client.get_guild(guildID)
    talkchannels = [client.get_channel(channelID)]

    # limit = int(re.findall(r'limit=(\d+)', " ".join(sys.argv))[0])

    # channelIDs = re.findall(r' (\d+) ', " ".join(sys.argv))


@client.event
async def on_message(message):
    if message.author == client.user:
        return

    if not message.channel in talkchannels:
        return
    
    response = getBotResponse(message.content)

    await message.channel.send(response)


model.loadModel()


with open('token.txt', 'r') as tokentxt:
    # asyncio.get_event_loop().create_task(pingean())
    client.run(tokentxt.read())

Example #24
0
def main(config):
    if not (os.path.exists(rawAudioPath) and os.path.exists(metadataPathTrain) and os.path.exists(metadataPathTest)):
        print("The audio data and csv metadata must be located in the following paths:\n"
              f"1. {rawAudioPath}\n2. {metadataPathTrain}\n3. {metadataPathTest}")
        sys.exit()

    config = parseArgs(config)
    setSeed(config.randomSeed)
    logs = {"epoch": [], "iter": [], "saveStep": config.saveStep, "loggingStep": config.loggingStep}

    loadOptimizer = False
    if config.pathCheckpoint is not None:
        cdata = getCheckpointData(config.pathCheckpoint)
        if cdata is not None:
            data, logs, locArgs = cdata
            print(f"Checkpoint detected at {data}")
            loadArgs(config, locArgs, forbiddenAttr={"nGPU", "pathCheckpoint", "maxChunksInMem", "chunkSize"})
            config.load, loadOptimizer = [data], True
            config.loadCriterion = True

    print(f'CONFIG: \n{json.dumps(vars(config), indent=4, sort_keys=True)}')
    print('-' * 50)

    useGPU = torch.cuda.is_available()

    metadata_dir = f'data/musicnet_metadata_train_transcription_{config.labelsBy}_trainsplit.csv' if \
                           config.transcriptionWindow is not None \
                           else f'data/musicnet_metadata_train_{config.labelsBy}_trainsplit.csv'

    if not os.path.exists(metadata_dir):
        # if config.transcriptionWindow is not None:
        #    musicNetMetadataTrain = pd.read_csv('data/musicnet_metadata_transcript_train_alldata.csv')
        # else:
        musicNetMetadataTrain = pd.read_csv('data/musicnet_metadata_train.csv', index_col = 'id')
        try:
            if config.transcriptionWindow is not None:
               metadataTrain, metadataVal = train_test_split(musicNetMetadataTrain, test_size=0.1)
                                                          # stratify=musicNetMetadataTrain[config.labelsBy])
            else:
               metadataTrain, metadataVal = train_test_split(musicNetMetadataTrain, test_size=0.1,
                                                             stratify=musicNetMetadataTrain[config.labelsBy])
            print(metadataTrain.shape, metadataVal.shape)

        except ValueError:
            for col, count in zip(musicNetMetadataTrain[config.labelsBy].value_counts().index,
                                  musicNetMetadataTrain[config.labelsBy].value_counts().values):
                if count == 1:
                    subDF = musicNetMetadataTrain.loc[musicNetMetadataTrain[config.labelsBy] == col]
                    musicNetMetadataTrain = musicNetMetadataTrain.append(subDF)
            metadataTrain, metadataVal = train_test_split(musicNetMetadataTrain, test_size=0.1,
                                                          stratify=musicNetMetadataTrain[config.labelsBy])
        if config.transcriptionWindow is not None:
           musicNetMetadataTranscript = pd.read_csv('data/musicnet_metadata_transcript_train_alldata.csv')
           metadataTrain = musicNetMetadataTranscript[musicNetMetadataTranscript['id'].isin(metadataTrain.index)]
           metadataVal = musicNetMetadataTranscript[musicNetMetadataTranscript['id'].isin(metadataVal.index)]
           metadataTrain.to_csv(f'data/musicnet_metadata_train_transcription_{config.labelsBy}_trainsplit.csv')
           metadataVal.to_csv(f'data/musicnet_metadata_train_transcription_{config.labelsBy}_valsplit.csv')
        else:
           metadataTrain.to_csv(f'data/musicnet_metadata_train_{config.labelsBy}_trainsplit.csv')
           metadataVal.to_csv(f'data/musicnet_metadata_train_{config.labelsBy}_valsplit.csv')
    else:
        if config.transcriptionWindow is not None:
           metadataTrain = pd.read_csv(f'data/musicnet_metadata_train_transcription_{config.labelsBy}_trainsplit.csv')
           metadataVal = pd.read_csv(f'data/musicnet_metadata_train_transcription_{config.labelsBy}_valsplit.csv')
        else:
           metadataTrain = pd.read_csv(f'data/musicnet_metadata_train_{config.labelsBy}_trainsplit.csv', index = 'id')
           metadataVal = pd.read_csv(f'data/musicnet_metadata_train_{config.labelsBy}_valsplit.csv', index = 'id')

    print("Loading the training dataset")
    trainDataset = AudioBatchData(rawAudioPath=rawAudioPath,
                                  metadata=metadataTrain,
                                  sizeWindow=config.sizeWindow,
                                  labelsBy=config.labelsBy,
                                  outputPath='data/musicnet_lousy/train_data/train',
                                  CHUNK_SIZE=config.chunkSize,
                                  NUM_CHUNKS_INMEM=config.maxChunksInMem,
                                  useGPU=useGPU,
                                  transcript_window=config.transcriptionWindow)
    print("Training dataset loaded")
    print("")

    print("Loading the validation dataset")
    valDataset = AudioBatchData(rawAudioPath=rawAudioPath,
                                metadata=metadataVal,
                                sizeWindow=config.sizeWindow,
                                labelsBy=config.labelsBy,
                                outputPath='data/musicnet_lousy/train_data/val',
                                CHUNK_SIZE=config.chunkSize,
                                NUM_CHUNKS_INMEM=config.maxChunksInMem,
                                useGPU=False,
                                transcript_window=config.transcriptionWindow)
    print("Validation dataset loaded")
    print("")

    if config.load is not None:
        cpcModel, config.hiddenGar, config.hiddenEncoder = loadModel(config.load, config)
    else:
        # Encoder network
        encoderNet = CPCEncoder(config.hiddenEncoder, 'layerNorm', sincNet=config.encoderType == 'sinc')
        # AR Network
        arNet = getAR(config)

        cpcModel = CPCModel(encoderNet, arNet)

    batchSize = config.batchSize
    cpcModel.supervised = config.supervised

    # Training criterion
    if config.load is not None and config.loadCriterion:
        cpcCriterion = loadCriterion(config.load[0], cpcModel.gEncoder.DOWNSAMPLING,
                                     len(metadataTrain[config.labelsBy].unique()))
    else:
        cpcCriterion = getCriterion(config, cpcModel.gEncoder.DOWNSAMPLING,
                                    len(metadataTrain[config.labelsBy].unique())) # change for transcription labels

    if loadOptimizer:
        stateDict = torch.load(config.load[0], 'cpu')
        cpcCriterion.load_state_dict(stateDict["cpcCriterion"])

    if useGPU:
        cpcCriterion.cuda()
        cpcModel.cuda()

    # Optimizer
    gParams = list(cpcCriterion.parameters()) + list(cpcModel.parameters())
    lr = config.learningRate
    optimizer = torch.optim.Adam(gParams, lr=lr, betas=(config.beta1, config.beta2), eps=config.epsilon)

    if loadOptimizer:
        print("Loading optimizer " + config.load[0])
        state_dict = torch.load(config.load[0], 'cpu')
        if "optimizer" in state_dict:
            optimizer.load_state_dict(state_dict["optimizer"])

    # Checkpoint
    expDescription = f'{config.samplingType}_'
    if config.samplingType == 'samecategory':
        expDescription += f'{config.labelsBy}_'

    pathCheckpoint = f'logs/{expDescription}{datetime.now().strftime("%d-%m_%H-%M-%S")}'
    os.makedirs(pathCheckpoint, exist_ok=True)
    pathCheckpoint = os.path.join(pathCheckpoint, "checkpoint")

    scheduler = None
    if config.schedulerStep > 0:
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                    config.schedulerStep,
                                                    gamma=0.5)
    if config.schedulerRamp is not None:
        n_epoch = config.schedulerRamp
        print(f"Ramp activated. n_e = {n_epoch}")
        scheduler_ramp = torch.optim.lr_scheduler.LambdaLR(optimizer,
                                                           lr_lambda=lambda epoch: rampSchedulingFunction(n_epoch,
                                                                                                          epoch),
                                                           last_epoch=-1)
        if scheduler is None:
            scheduler = scheduler_ramp
        else:
            scheduler = SchedulerCombiner([scheduler_ramp, scheduler], [0, config.schedulerRamp])
    if scheduler is not None:
        for i in range(len(logs["epoch"])):
            scheduler.step()

    experiment = None
    if config.log2Board:
        comet_ml.init(project_name="jtm", workspace="tiagocuervo")
        if not os.path.exists('.comet.config'):
            cometKey = input("Please enter your Comet.ml API key: ")
            experiment = comet_ml.Experiment(cometKey)
            cometConfigFile = open(".comet.config", "w")
            cometConfigFile.write(f"[comet]\napi_key={cometKey}")
            cometConfigFile.close()
        else:
            experiment = comet_ml.Experiment()
        experiment.log_parameters(vars(config))

    run(trainDataset, valDataset, batchSize, config.samplingType, cpcModel, cpcCriterion, config.nEpoch, optimizer,
        scheduler, pathCheckpoint, logs, useGPU, log2Board=config.log2Board, experiment=experiment)