abb_losses = []  #keeps track of loss values for ABB words
training_losses = []  #keeps track of loss on training data each run
make_ABB = lambda x: x[:1] + [1.0] + x[1 + 1:]  #Adds semantic information
make_ABA = lambda x: x[:1] + [-1.0] + x[1 + 1:]  #Adds semantic information

for rep in range(REPS):
    print("Rep: " + str(rep))

    #Erase the previous model:
    keras.backend.clear_session()

    #Build the new model:
    model = Seq2Seq.seq2seq(input_dim=FEAT_NUM,
                            hidden_dim=FEAT_NUM,
                            output_length=2,
                            output_dim=FEAT_NUM,
                            batch_size=1,
                            learn_rate=0.001,
                            layer_type="lstm",
                            dropout=DROPOUT)

    #PRETRAINING
    if VOCAB_SIZE > 0:
        print("Simulating real-life experience of infants...Rep=" + str(rep))
        irl_X = []
        irl_Y = []
        for word in range(VOCAB_SIZE):
            ##Putting reduplication in training:
            if np.random.rand() < REDUP_IN_PT:
                syll_alpha = choice(all_sylls)
                template = choice(["ABB", "ABA"])
                if template == "ABB":
    elif SCOPE == "segment":
        feat_num, withheld_syll, syllables = novel_seg_data()
    elif SCOPE == "syllable":
        feat_num, withheld_syll, syllables = novel_syll_data()
    else:
        raise Exception(
            "Wrong scope! Must be from the set {feature, segment, syllable}.")

    X = np.array(syllables)
    Y = np.array([syll + syll for syll in syllables])

    #Build the model:
    model = Seq2Seq.seq2seq(input_dim=feat_num,
                            hidden_dim=feat_num * 3,
                            output_length=Y.shape[1],
                            output_dim=Y.shape[2],
                            batch_size=1,
                            learn_rate=0.001,
                            layer_type="lstm",
                            dropout=DROPOUT)

    #Train the model:
    hist = model.train(X, Y, epoch_num=EPOCHS, print_every=10)
    learning_curves.append(hist["Loss"])

    #Test the model on trained data:
    trained_IN = np.tile(X[0], (1, 1, 1))
    trained_OUT = np.tile(Y[0], (1, 1, 1))
    train_pred = model.predict(trained_IN)

    #Test the model on withheld data:
    withheld_IN = np.tile(np.array(withheld_syll), (1, 1, 1))
Exemplo n.º 3
0
        #Shuffle training data:
        indexes = list(range(len(ordered_X)))
        shuffle(indexes)
        X = np.array([ordered_X[i] for i in indexes])
        Y = np.array([ordered_Y[i] for i in indexes])

        #Create the model object:
        #model = Seq2Seq(input_dim=FEAT_NUM, hidden_dim=FEAT_NUM*3, output_length=3, output_dim=FEAT_NUM, depth=2)

        #Build the new model:
        model = Seq2Seq.seq2seq(
            input_dim=FEAT_NUM,
            hidden_dim=FEAT_NUM * 3,
            output_length=3,
            output_dim=FEAT_NUM,
            batch_size=1,
            learn_rate=0.005,
            layer_type="lstm",
        )

        this_curve = []
        for ep in range(EPOCHS):
            #Train the model one epoch at a time,
            #so we can give it a forced-choice task at each step:
            print("Epoch: " + str(ep))
            hist = model.train(X, Y, epoch_num=1, print_every=2)
            this_curve.append(hist["Loss"][-1])
            for trial_type in accuracies.keys():
                corr_loss = np.square(
                    np.subtract(