示例#1
0
def main(args):
    dataset = EMNIST()
    iniciales = np.arange(10)
    Xinit, Yinit = dataset.load_segment_of_data(iniciales, "train")
    Xinit, Yinit = shuffle(Xinit, Yinit, random_state=args.seed)

    #train(Xinit, one_hot(Yinit, 10), args, n_classes=10, exp=1, split=0, n_epochs=25)

    Xtest, Ytest = dataset.load_segment_of_data(iniciales, "test")
    Xtest = np.where(Xtest >= 0.5, 1.0, 0.0)
    eval_str, _ = eval(Xtest,
                       one_hot(Ytest, 10),
                       args,
                       n_classes=10,
                       exp=1,
                       split=0)
    omax = get_train_fit(Xinit,
                         one_hot(Yinit, 10),
                         args,
                         n_classes=10,
                         exp=1,
                         split=0)
    Xc, Yc = dataset.load_segment_of_data([10], kind="train")
    get_novel_detections(Xc, Yc, omax, args, n_classes=11, exp=1, split=0)
    print("Done!")
示例#2
0
def main(args):
    if args.dataset == "MNIST":
        dataset = MNIST()
    elif args.dataset == "CIFAR10":
        dataset = CIFAR10()
    elif args.dataset == "EMNIST":
        dataset = EMNIST()
    else:  # args.dataset == "EMNIST":
        raise NotImplementedError("Aun no está disponible")

    with open("res_exp3.txt", "w") as f:
        for exp in np.arange(1, args.n_exps + 1):
            f.write("\n-- Experimento {}/{} --\n".format(exp, args.n_exps))
            n_epochs = args.n_epochs
            #Directorios de guardado
            create_dir(args.save_path + "exp_{}/".format(exp))
            img_dir = args.save_path + "imgs/img_{}/".format(exp)
            create_dir(img_dir)
            #np.random.seed(args.seed)
            #clases indexadas
            #real_classes = np.arange(dataset.n_classes)
            if args.ordered:
                shuffled_classes = np.append(
                    np.arange(args.starting),
                    np.random.permutation(
                        np.arange(args.starting,
                                  dataset.n_classes)))  #Si lo hacemos ordenado
            else:
                shuffled_classes = np.random.permutation(dataset.n_classes)
            initial_classes = shuffled_classes[:args.starting]
            print("Clases Iniciales: " +
                  ",".join([CARACTERES[ch] for ch in initial_classes]))
            unknown_classes = shuffled_classes[args.starting:]

            #Cargamos las clases iniciales
            Xinit, Yinit = dataset.load_segment_of_data(
                initial_classes, "train")
            Yinit = change_indexes(
                Yinit, initial_classes)  #cambiamos del indice de 0- starting
            Xinit, Yinit = shuffle(Xinit, Yinit, random_state=args.seed)
            #Xinit = np.where(Xinit >= 0.5, 1.0, 0.0) #binarizar

            train(Xinit,
                  one_hot(Yinit, args.starting),
                  args,
                  n_classes=args.starting,
                  exp=exp,
                  split=0,
                  n_epochs=n_epochs)

            #Evaluamos el desempeño inicial
            Xitest, Yitest = dataset.load_segment_of_data(
                initial_classes, "test")
            Yitest = change_indexes(Yitest, initial_classes)
            Xitest = np.where(Xitest >= 0.5, 1.0, 0.0)  #binarizar
            eval_str, _ = eval(Xitest,
                               one_hot(Yitest, args.starting),
                               args,
                               n_classes=args.starting,
                               exp=exp,
                               split=0)
            f.write("--" + eval_str)
            check_dist_space(Xitest,
                             one_hot(Yitest, args.starting),
                             args,
                             n_classes=args.starting,
                             exp=exp,
                             split=0)

            #Creamos nuestra instancia de OpenMax
            try:
                omax = get_train_fit(Xinit,
                                     one_hot(Yinit, args.starting),
                                     args,
                                     n_classes=args.starting,
                                     exp=exp,
                                     split=0)
                if omax is None:
                    print("Muy pocos casos")
                    f.write("\nno se pudo ajustar mas\n---END---\n")
                    continue
            except ValueError:
                continue

            for idx, unk in enumerate(unknown_classes):
                print("Reconociendo " + CARACTERES[unknown_classes[idx]])
                n_epochs = 5 + int(
                    args.n_epochs * (0.9**idx)
                ) if args.decay_epoch else int(
                    args.n_epochs / 2
                )  #.astype(np.int) #in(0.95*n_epochs) if n_epochs > 8 else 10
                #if not is_increment:
                #    Xitest, Yitest = dataset.load_segment_of_data(initial_classes)
                #else:
                initial_classes = shuffled_classes[:args.starting + idx]
                Xitest, Yitest = dataset.load_segment_of_data(initial_classes,
                                                              kind="test")
                Xc, Yc = dataset.load_segment_of_data([unk], kind="train")

                Xnew = np.concatenate([Xitest, Xc], 0)
                Ynew = np.concatenate([Yitest, Yc], 0)
                nclasses = args.starting + idx + 1
                Ynew = change_indexes(Ynew, shuffled_classes[:nclasses])
                #Xnew = np.where(Xnew >= 0.5, 1.0, 0.0) #binarizar

                is_unks, Xunk, Yunk, res_text = get_novel_detections(
                    Xnew,
                    Ynew,
                    omax,
                    args,
                    n_classes=nclasses,
                    exp=exp,
                    split=idx)
                #Revisamos si hay casos nuevos o no:
                f.write(res_text)
                if not is_unks:
                    print("---END---")
                    f.write("---END---\n")
                    break

                #Generamos datos
                Xgen, Ygen, gen_text = generate(args,
                                                n_classes=nclasses - 1,
                                                exp=exp,
                                                split=idx,
                                                n_generados=int(0.9 *
                                                                len(Yunk)))
                imagen_grande(Xgen, 10,
                              img_dir + "recuerdo{:02d}.png".format(idx))
                #Los unimos con los datos nuevos
                Xinit = np.concatenate([Xgen, Xunk], axis=0)
                Yinit = np.concatenate([Ygen, Yunk], axis=0)
                Xinit, Yinit = shuffle(Xinit, Yinit, random_state=args.seed)
                #Xinit = np.where(Xinit >= 0.5, 1.0, 0.0) #binarizar

                #Entrenamos
                train(Xinit,
                      one_hot(Yinit, nclasses),
                      args,
                      n_classes=nclasses,
                      increment=True,
                      exp=exp,
                      split=idx + 1,
                      n_epochs=n_epochs)  #int(0.8*args.n_epochs))

                #Evaluamos
                Xitest, Yitest = dataset.load_segment_of_data(
                    shuffled_classes[:nclasses], "test")
                Yitest = change_indexes(Yitest, shuffled_classes[:nclasses])
                #Xitest = np.where(Xitest >= 0.5, 1.0, 0.0) #binarizar
                eval_str, acc = eval(Xitest,
                                     one_hot(Yitest, nclasses),
                                     args,
                                     n_classes=nclasses,
                                     exp=exp,
                                     split=idx + 1)
                f.write(CARACTERES[unknown_classes[idx]] + "-" + eval_str)

                #Caso de olvido catastrofico
                olvido = (1 / nclasses) + 0.05
                if acc <= olvido:
                    forget_text = "\nAccuracy {:.3f} <= {:.3f}.\n---Olvido Catastrofico---\n".format(
                        100 * acc, 100 * olvido)
                    print(forget_text)
                    f.write(forget_text)
                    break

                check_dist_space(Xitest,
                                 one_hot(Yitest, nclasses),
                                 args,
                                 n_classes=nclasses,
                                 exp=exp,
                                 split=idx + 1)
                #Y creamos nuestro nuevo detector
                omax = get_train_fit(Xinit,
                                     one_hot(Yinit, nclasses),
                                     args,
                                     n_classes=nclasses,
                                     exp=exp,
                                     split=idx + 1)
                if omax is None:
                    print("Muy pocos casos")
                    f.write("\nno se pudo ajustar mas\n---END---\n")
                    break
示例#3
0
 def RNG(self):
     while True:
         z = np.random.uniform(-np.sqrt(3), np.sqrt(3), self.z_size)
         y = np.random.randint(0, self.n_classes)
         y = one_hot(y, self.n_classes).squeeze()
         yield (z, y)
示例#4
0
文件: exp01.py 项目: dmelladoc/SIGANN
def main(args):
    # Cargar dataset seleccionado
    if args.dataset == "MNIST":
        dataset = MNIST()
    elif args.dataset == "CIFAR10":
        dataset = CIFAR10()
    elif args.dataset == "EMNIST":
        dataset = EMNIST()
    else:  # args.dataset == "EMNIST":
        raise NotImplementedError("Aun no está disponible")

    assert dataset.n_classes > args.segments, "Segmentos deben ser menos que las clases"
    f = open("resultados.txt", "w")

    for exp in np.arange(1, args.n_exps + 1):
        f.write("\n-- Experimento {}/{} --\n".format(exp, args.n_exps))
        # print("-- Experimento {}/{} --".format(exp, args.n_exps))
        create_dir(args.save_path + "exp_{}/".format(exp))
        create_dir(
            args.monitor +
            "exp_{}/".format(exp))  # Creamos los directorios de guardado
        create_dir("gen_img/exp_{}/".format(exp))

        np.random.seed(args.seed)

        # Seleccionamos los segmentos de datos
        splits = np.array_split(np.arange(dataset.n_classes), args.segments)
        for split_idx, split in enumerate(splits):
            f.write("Split {}: ".format(split_idx + 1) + str(split) + "\n")
            # print("Split {}".format(split_idx + 1), split)
            split_classes = split.max(
            ) + 1  # Clases para empezar la etapa incremental

            # Si es la primera etapa:
            if split_idx == 0:
                train_epochs = args.n_epochs
                is_increment = False
                X_in, Y_in = dataset.load_segment_of_data(split, "train")
            else:
                train_epochs = int(
                    0.75 * train_epochs
                )  #args.n_epochs / 2)  # 5 #Muy pocos epochs de entrenamiento al parecer
                is_increment = True
                _X_in, _Y_in = dataset.load_segment_of_data(split, "train")
                if args.rehearsal:
                    X_r, Y_r = dataset.load_percent_of_data(
                        np.concatenate(splits[:split_idx], axis=0),
                        args.percentage, "train")
                    X_in = np.concatenate([_X_in, X_r, X_pseudo], axis=0)
                    Y_in = np.concatenate([_Y_in, Y_r, Y_pseudo], axis=0)
                else:
                    X_in = np.concatenate([_X_in, X_pseudo], axis=0)
                    Y_in = np.concatenate([_Y_in, Y_pseudo], axis=0)

            X_in, Y_in = shuffle(X_in, Y_in, random_state=args.seed)
            X_in = np.where(X_in >= 0.5, 1.0, 0.0)  #binarizar

            Y_in = one_hot(Y_in, split_classes)
            train(X_in,
                  Y_in,
                  args,
                  increment=is_increment,
                  n_classes=split_classes,
                  exp=exp,
                  split=split_idx,
                  n_epochs=train_epochs)
            # Evaluamos el accuracy

            X_test, Y_test = dataset.load_segment_of_data(
                np.arange(split_classes), "test")
            X_test = np.where(X_test >= 0.5, 1.0, 0.0)  #binarizar
            Y_test = one_hot(Y_test, split_classes)

            eval_str = eval(X_test,
                            Y_test,
                            args,
                            n_classes=split_classes,
                            exp=exp,
                            split=split_idx)
            f.write(eval_str)

            X_pseudo, Y_pseudo, gen_str = generate(X_test,
                                                   Y_test,
                                                   args,
                                                   n_classes=split_classes,
                                                   exp=exp,
                                                   split=split_idx,
                                                   n_generados=args.n_generate)
            f.write(gen_str)

            gen_img_file = "gen_img/exp_{}/generado_e{:02d}_s{:02d}.png".format(
                exp, exp, split_idx + 1)
            imagen_grande(X_pseudo[:100], n=10, out_filename=gen_img_file)

    f.close()
示例#5
0
def main(args):
    dataset = EMNIST()

    with open("res_exp2.txt", "w") as f:
        for exp in np.arange(1, args.n_exps+1): #Haremos 1 validacion por ahora
            #exp=1
            f.write("\n-- Experimento {}/{} --\n".format(exp, args.n_exps))#1, 1))#

            #Directorios de guardado
            create_dir(args.save_path + "exp_{}/".format(exp))

            np.random.seed(args.seed)
            #Entrenaremos la red con los datos iniciales de los digitos
            Xtrain, Ytrain = dataset.load_segment_of_data(np.arange(10), "train")
            Xtrain, Ytrain = shuffle(Xtrain, Ytrain, random_state=args.seed)
            Ytrain = one_hot(Ytrain, 10)

            train(Xtrain, Ytrain, args, n_classes=10, exp=exp, n_epochs=args.n_epochs)
            #Evaluamos el desempeño del modelo
            Xtest, Ytest = dataset.load_segment_of_data(np.arange(10), "test")
            Ytest = one_hot(Ytest, 10)
            eval_str = eval(Xtest, Ytest, args, n_classes=10, exp=1)
            f.write(eval_str)
            f.write("Letra;SoftmaxAcc;OpenMaxAcc;Deteccion;Errores\n")

            omax = get_train_fit(Xtrain, Ytrain, args, n_classes=10, exp=1)
            save_file = args.save_path + "exp_{}/model_split{}.ckpt".format(exp, 0)

            Xtest, Ytest = dataset.load_segment_of_data(np.arange(10), "test")
            with tf.Graph().as_default():
                dset = InputGenerator([None,28,28,1], 10, args.z_size, batch_size=args.batch_size, n_epochs=1)
                aae = AAE("test", batch_size=args.batch_size, z_size=args.z_size, n_epochs=1, n_classes=10)
                iterador = dset.create_test_generator()
                x_input = iterador.get_next()
                _, y_tilde = aae.encoder(x_input, supervised=True)

                saver = tf.train.Saver()
                with tf.Session() as sess:
                    saver.restore(sess, tf.train.latest_checkpoint(args.save_path + "exp_{}/".format(exp)))

                    for i in np.arange(10, dataset.n_classes):
                        Xc, Yc = dataset.load_segment_of_data([i], kind="train")
                        Xchar = np.concatenate([Xtest, Xc], 0)
                        Ychar = np.concatenate([Ytest, Yc], 0)
                        Ychar[Ychar>9] = 10

                        sess.run(iterador.initializer, {dset.x_input:Xchar})
                        eval_logits = np.empty((0, 10), dtype=np.float32)
                        try:
                            while True:
                                logit = sess.run(y_tilde)
                                eval_logits = np.append(eval_logits, logit, axis=0)
                        except tf.errors.OutOfRangeError:
                            pass

                        openmax = omax.evaluate(eval_logits, Ychar)
                        softmax = omax._softmax(eval_logits)

                        Ypred = np.where(openmax.max(-1) <= args.threshold, 10, openmax.argmax(-1))
                        sm_acc = accuracy_score(Ychar, softmax.argmax(-1))
                        om_acc = accuracy_score(Ychar, Ypred)

                        detect = len(np.intersect1d(np.argwhere(Ypred == 10).squeeze(), np.argwhere(Ychar == 10).squeeze()))/len(Yc)
                        mistakes = len(np.intersect1d(np.argwhere(Ypred == 10).squeeze(), np.argwhere(Ychar < 10).squeeze()))/len(Yc)
                        res_text = "{};{:2.3f};{:2.3f};{:2.3f};{:2.3f}\n".format(CARACTERES[i], 100*sm_acc, 100*om_acc, 100*detect, 100*mistakes)
                        print(res_text)
                        f.write(res_text)