Ejemplo n.º 1
1
def train(holdout=False, holdout_list=[]):
    # load data
    X, Y = load_X_and_Y()
    x_train, x_dev, x_test = X
    y_train, y_dev, y_test = Y

    # holdout training and dev data if requested
    if holdout:
        X_descr = load_X_descr()
        x_train_all_descr, x_dev_all_descr, _ = X_descr

        holdout_x_train = []
        holdout_y_train = []
        for idx in range(len(x_train)):
            if x_train_all_descr[idx] not in holdout_list:
                holdout_x_train.append(x_train[idx])
                holdout_y_train.append(y_train[idx])

        holdout_x_dev = []
        holdout_y_dev = []
        for idx in range(len(x_dev)):
            if x_dev_all_descr[idx] not in holdout_list:
                holdout_x_dev.append(x_dev[idx])
                holdout_y_dev.append(y_dev[idx])
        
        x_train = np.array(holdout_x_train).reshape((-1, 224, 224, 3))
        y_train = np.array(holdout_y_train).reshape((-1, 1))

        x_dev = np.array(holdout_x_dev).reshape((-1, 224, 224, 3))
        y_dev = np.array(holdout_y_dev).reshape((-1, 1))
    
    # train model
    model = CNN()
    model.fit(x_train, y_train, x_dev, y_dev, save=True)
    model.evaluate(x_test, y_test)
Ejemplo n.º 2
0
def main():
    train_dataset = h5py.File('datasets/train_signs.h5', "r")
    X_train_orig = np.array(
        train_dataset["train_set_x"][:])  # your train set features
    Y_train_orig = np.array(
        train_dataset["train_set_y"][:])  # your train set labels

    test_dataset = h5py.File('datasets/test_signs.h5', "r")
    X_test_orig = np.array(
        test_dataset["test_set_x"][:])  # your test set features
    Y_test_orig = np.array(
        test_dataset["test_set_y"][:])  # your test set labels

    classes = np.array(test_dataset["list_classes"][:])  # the list of classes

    Y_train_orig = Y_train_orig.reshape((1, Y_train_orig.shape[0]))
    Y_test_orig = Y_test_orig.reshape((1, Y_test_orig.shape[0]))

    X_train = X_train_orig / 255.
    X_test = X_test_orig / 255.
    Y_train = convert_to_one_hot(Y_train_orig, 6).T
    Y_test = convert_to_one_hot(Y_test_orig, 6).T
    print("number of training examples = " + str(X_train.shape[0]))
    print("number of test examples = " + str(X_test.shape[0]))
    print("X_train shape: " + str(X_train.shape))
    print("Y_train shape: " + str(Y_train.shape))
    print("X_test shape: " + str(X_test.shape))
    print("Y_test shape: " + str(Y_test.shape))
    conv_layers = {}

    clf = CNN()
    clf.fit(X_train, Y_train, X_test, Y_test)
Ejemplo n.º 3
0
def train(holdout=False, holdout_list=[]):
    # load data
    X, Y = load_X_and_Y()
    x_train, x_dev, x_test = X
    y_train, y_dev, y_test = Y

    # holdout training and dev data if requested
    if holdout:
        X_descr = load_X_descr()
        x_train_all_descr, x_dev_all_descr, _ = X_descr

        holdout_x_train = []
        holdout_y_train = []
        for idx in range(len(x_train)):
            if x_train_all_descr[idx] not in holdout_list:
                holdout_x_train.append(x_train[idx])
                holdout_y_train.append(y_train[idx])

        holdout_x_dev = []
        holdout_y_dev = []
        for idx in range(len(x_dev)):
            if x_dev_all_descr[idx] not in holdout_list:
                holdout_x_dev.append(x_dev[idx])
                holdout_y_dev.append(y_dev[idx])

        x_train = np.array(holdout_x_train).reshape((-1, 224, 224, 3))
        y_train = np.array(holdout_y_train).reshape((-1, 1))

        x_dev = np.array(holdout_x_dev).reshape((-1, 224, 224, 3))
        y_dev = np.array(holdout_y_dev).reshape((-1, 1))

    # train model
    model = CNN()
    model.fit(x_train, y_train, x_dev, y_dev, save=True)
    model.evaluate(x_test, y_test)
Ejemplo n.º 4
0
def train():
    # load data
    X, Y = load_X_and_Y()
    x_train, x_dev, x_test = X
    y_train, y_dev, y_test = Y

    # train model
    model = CNN()
    model.fit(x_train, y_train, x_dev, y_dev, save=True)
    model.evaluate(x_test, y_test)
Ejemplo n.º 5
0
def gridSearch(xTrain, yTrain, xDev, yDev, options):
    paramCombos = myProduct(options)
    bestCombo, bestCrossEntropy = None, float('inf')
    scores = {}
    
    for combo in paramCombos:
        cnn = CNN(numFilters=combo['numFilters'], windowSize=combo['windowSize'])
        cnn.fit(xTrain[:combo['numTrain']], yTrain[:combo['numTrain']], numEpochs=combo['numEpochs'],
                batchSize=combo['batchSize'], verbose=True)
        crossEntropy, accuracy = cnn.evaluate(xDev, yDev, showAccuracy=True)
        scores[tuple(combo.items())] = (crossEntropy, accuracy)
        if crossEntropy < bestCrossEntropy:
            bestCombo, bestCrossEntropy = combo, crossEntropy
        print 'Combo: {}, CE: {}, accuracy: {}'.format(combo, crossEntropy, accuracy)
    return scores
Ejemplo n.º 6
0
	def train(self):

		#Change data to required format
		img_rows,img_columns = 45,45
		train_data = self.train_img.reshape((self.train_img.shape[0], img_rows,img_columns))
		train_data = train_data[:, np.newaxis, :, :]
		test_data = self.test_img.reshape((self.test_img.shape[0], img_rows,img_columns))
		test_data = test_data[:, np.newaxis, :, :]
		label_map={}
		count = 0
		for folder in os.listdir("./data"):
			label_map[folder]=count
			count+=1
		for i in range(len(self.train_labels)):
			self.train_labels[i]=label_map[self.train_labels[i]]
		for j in range(len(self.test_labels)):
			self.test_labels[j]=label_map[self.test_labels[j]]

		# Transform training and testing data to 10 classes in range [0,classes] ; num. of classes = 0 to 9 = 10 classes
		total_classes = count
		train_labels = np_utils.to_categorical(self.train_labels, total_classes)
		test_labels = np_utils.to_categorical(self.test_labels,total_classes)


		# Defing and compile the SGD optimizer and CNN model
		print('\n Compiling model...')
		sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
		clf = CNN().build(img_rows,img_columns,1,total_classes)
		clf.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])

		# Initially train and test the model; If weight saved already, load the weights using arguments.
		num_epoch = self.num_epoch		# Number of epochs
		verb = 1			# Verbose
		print('\nTraining the Model...')
		clf.fit(train_data, train_labels, batch_size=self.b_size, nb_epoch=num_epoch,verbose=verb)

		# Evaluate accuracy and loss function of test data
		print('Evaluating Accuracy and Loss Function...')
		loss, accuracy = clf.evaluate(test_data, test_labels, batch_size=self.b_size, verbose=1)
		print('Accuracy of Model: {:.2f}%'.format(accuracy * 100))
		clf.save_weights('model.h5', overwrite=True)
		return accuracy
Ejemplo n.º 7
0
def train(i, j):

	train_path = TRAIN_DIR + i + "Train.csv"
	test_path = TEST_DIR + j + "Test.csv"

	trainY, trainX, testY, testX, words  = termdocumentmatrix(train_path, test_path)
	n = len(trainX[0][0])

	model = CNN(n).get_model()

		# train
	model.fit(trainX, trainY, nb_epoch=15 batch_size=100)
		
	test_accuracy = get_accuracy(model, testX, testY)
	print i, j, test_accuracy
	write_str = str(i)+"\t"+str(j)+"\t"+str(test_accuracy)  + "\n"
		
	f = open(output, "a")
	f.write(write_str)
	f.close()
Ejemplo n.º 8
0
def gridSearch(xTrain, yTrain, xDev, yDev, options):
    paramCombos = myProduct(options)
    bestCombo, bestCrossEntropy = None, float('inf')
    scores = {}

    for combo in paramCombos:
        cnn = CNN(numFilters=combo['numFilters'],
                  windowSize=combo['windowSize'])
        cnn.fit(xTrain[:combo['numTrain']],
                yTrain[:combo['numTrain']],
                numEpochs=combo['numEpochs'],
                batchSize=combo['batchSize'],
                verbose=True)
        crossEntropy, accuracy = cnn.evaluate(xDev, yDev, showAccuracy=True)
        scores[tuple(combo.items())] = (crossEntropy, accuracy)
        if crossEntropy < bestCrossEntropy:
            bestCombo, bestCrossEntropy = combo, crossEntropy
        print 'Combo: {}, CE: {}, accuracy: {}'.format(combo, crossEntropy,
                                                       accuracy)
    return scores
Ejemplo n.º 9
0
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print('x_train shape:', x_train.shape, 'x_test.shape:', x_test.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # Convert class vectors to binary class matrices
    y_train = np_utils.to_categorical(y_train, num_classes)
    y_test = np_utils.to_categorical(y_test, num_classes)

    # Build the model
    model = CNN(num_classes, input_shape, ngpus)
    model.fit(x_train,
              y_train,
              batch_size=batch_size * ngpus,
              epochs=nb_epochs,
              verbose=1,
              validation_data=(x_test, y_test))

    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])
Ejemplo n.º 10
0
# print img_columns
# print total_classes
clf = CNN().build(img_rows,img_columns,1,total_classes)
clf.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])

# Initially train and test the model; If weight saved already, load the weights using arguments.
b_size = 128		# Batch size
num_epoch = 20		# Number of epochs
verb = 1			# Verbose

# If weights saved and argument load_model; Load the pre-trained model.
if args["load_model"] < 0:
	print('\nTraining the Model...')
	# train_img=np.array([train_img])
	# print train_img.shape
	clf.fit(train_img, train_labels, batch_size=b_size, nb_epoch=num_epoch,verbose=verb)
	
	# Evaluate accuracy and loss function of test data
	print('Evaluating Accuracy and Loss Function...')
	loss, accuracy = clf.evaluate(test_img, test_labels, batch_size=128, verbose=1)
	print('Accuracy of Model: {:.2f}%'.format(accuracy * 100))

	
# Save the pre-trained model.
if args["save_model"] > 0:
	print('Saving weights to file...')
	clf.save_weights(args["weights"], overwrite=True)

	
# Show the images using OpenCV and making random selections.
for num in np.random.choice(np.arange(0, len(test_labels)), size=(5,)):
Ejemplo n.º 11
0
ds_train = tf.data.Dataset.list_files("data/train/*/*.jpg") \
        .map(load_img,num_parallel_calls=tf.data.experimental.AUTOTUNE) \
        .shuffle(buffer_size=1000).batch(BATCH_SIZE) \
        .prefetch(tf.data.experimental.AUTOTUNE)

ds_test = tf.data.Dataset.list_files("data/test/*/*.jpg") \
        .map(load_img,num_parallel_calls=tf.data.experimental.AUTOTUNE) \
        .shuffle(buffer_size=1000).batch(BATCH_SIZE) \
        .prefetch(tf.data.experimental.AUTOTUNE)

# 获得模型
model = CNN()
# 编译模型
model.compile(
        optimizer='adam',
        loss='binary_crossentropy',
        metrics=["accuracy"]
    )
# 训练中每个世代保存一次
cp_callback = ModelCheckpoint(
        'logs/ep{epoch:02d}-loss{loss:.2f}.h5',
        monitor='acc',save_weights_only=True, 
    )
# 训练模型
history = model.fit(ds_train,epochs=EPOCH,
        validation_data=ds_test,
        callbacks=[cp_callback],
    )
# 保存最终模型
model.save_weights('logs/last1.h5')
Ejemplo n.º 12
0
    #
    #    np.random.seed(0)
    #    X_test, y_test = datasets.make_moons(200, noise=0.20)

    return X_train, y_train, X_test, y_test


# Hyperparameters
step_size = 5e-3
reg_strength = 5e-3
n_iter = 5
n_layers = 2
neurons_per_layer = 784

## Hyperparameters
#step_size = 5e-3
#reg_strength = 5e-3
#n_iter = 3
#n_layers = 2
#neurons_per_layer = 10

# Network initialization
model = CNN(step_size, reg_strength, n_iter, n_layers, neurons_per_layer)

X_train, y_train, X_test, y_test = inputs()

# Training
model.fit(X_train, y_train)
# Testing
print("Accuracy: %02f" % (model.test_accuracy(X_test, y_test)))
Ejemplo n.º 13
0
train = pd.read_json(TRAIN_PATH)
train['inc_angle'] = pd.to_numeric(train['inc_angle'], errors='coerce')
train['band_1'] = train['band_1'].apply(lambda x: np.array(x).reshape(75, 75))
train['band_2'] = train['band_2'].apply(lambda x: np.array(x).reshape(75, 75))

batch_size = 10  #64
train_ds = ImageDataset(train[:10], include_target=True, u=0.5)
THREADS = 4
train_loader = DataLoader(train_ds,
                          batch_size,
                          sampler=RandomSampler(train_ds),
                          num_workers=THREADS,
                          pin_memory=use_gpu)

img_size = (75, 75)
img_ch = 2
kernel_size = 7
pool_size = 2
padding = 2
n_out = 1
n_epoch = 35

if __name__ == '__main__':
    cnn = CNN(img_size=img_size,
              img_ch=img_ch,
              kernel_size=kernel_size,
              pool_size=pool_size,
              n_out=n_out,
              padding=padding)
    cnn.fit(train_loader, n_epoch, batch_size, int(len(train_ds)))
Ejemplo n.º 14
0
from cnn import CNN
model = CNN()
X = [0]
Y = [0]
model.fit(X, Y)
model.predict(X)
Ejemplo n.º 15
0
			print title
			test_docs = get_data(test_path, split="test")
			for model in models_by_name:
				
				testX, testY = split_test(test_docs, model)
				trainX, trainY  = split(train_docs, model)
				
				trainX = np.asarray(trainX)
				testX = np.asarray(testX)
				trainY = np.asarray(trainY)
				testY = np.asarray(testY)
				trainX = reshapeX(trainX)
				testX = reshapeX(testX)
				trainY = reshapeY(trainY)
				cnn = CNN(100).get_model()
				cnn.fit(trainX, trainY,  epochs=20, batch_size=250, verbose=0)
				pred = cnn.predict(testX)
				val = []
				for i in pred:
					if i[0] > 0.50:
						val.append(0)
					else:
						val.append(1)
				
				linear_model = SVC()
				linear_model.fit(trainX, trainY)
				val = linear_model.predict(testX)
				test_accuracy = precision_recall_fscore_support(testY, val, average="weighted")

				print "Accuracy = {0:.4f}".format( accuracy_score(testY, val))
				print "Precision = {0:.4f}".format(test_accuracy[0])
Ejemplo n.º 16
0
def main():

    (X_train, y_train), (X_test,
                         y_test) = imdb.load_data(num_words=MAX_NUM_WORDS)
    X = np.concatenate((X_train, X_test), axis=0)
    y = np.concatenate((y_train, y_test), axis=0)
    labels = to_categorical(y)
    print('Training samples: %i' % len(X))

    # docs   = negative_docs + positive_docs
    # labels = [0 for _ in range(len(negative_docs))] + [1 for _ in range(len(positive_docs))]

    # labels = to_categorical(labels)
    # print('Training samples: %i' % len(docs))

    # tokenizer.fit_on_texts(docs)
    # sequences = tokenizer.texts_to_sequences(docs)

    # word_index = tokenizer.word_index

    result = [len(x) for x in X]
    print('Text informations:')
    print(
        'max length: %i / min length: %i / mean length: %i / limit length: %i'
        % (np.max(result), np.min(result), np.mean(result), MAX_SEQ_LENGTH))
    # print('vacobulary size: %i / limit: %i' % (len(word_index), MAX_NUM_WORDS))

    # Padding all sequences to same length of `MAX_SEQ_LENGTH`
    data = pad_sequences(X, maxlen=MAX_SEQ_LENGTH, padding='post')

    histories = []

    for i in range(RUNS):
        print('Running iteration %i/%i' % (i + 1, RUNS))
        random_state = np.random.randint(1000)

        X_train, X_val, y_train, y_val = train_test_split(
            data, labels, test_size=VAL_SIZE, random_state=random_state)

        emb_layer = None
        # if USE_GLOVE:
        #     emb_layer = create_glove_embeddings()

        model = CNN(embedding_layer=emb_layer,
                    num_words=MAX_NUM_WORDS,
                    embedding_dim=EMBEDDING_DIM,
                    filter_sizes=FILTER_SIZES,
                    feature_maps=FEATURE_MAPS,
                    max_seq_length=MAX_SEQ_LENGTH,
                    dropout_rate=DROPOUT_RATE,
                    hidden_units=HIDDEN_UNITS,
                    nb_classes=NB_CLASSES).build_model()

        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizers.Adam(),
                      metrics=['accuracy'])

        if i == 0:
            print(model.summary())
            plot_model(model,
                       to_file='cnn_model.png',
                       show_layer_names=False,
                       show_shapes=True)

        history = model.fit(
            X_train,
            y_train,
            epochs=NB_EPOCHS,
            batch_size=BATCH_SIZE,
            verbose=1,
            validation_data=(X_val, y_val),
            callbacks=[
                # TQDMCallback(),
                ModelCheckpoint('model-%i.h5' % (i + 1),
                                monitor='val_loss',
                                verbose=1,
                                save_best_only=True,
                                mode='min'),
                # TensorBoard(log_dir='./logs/temp', write_graph=True)
            ])
        print()
        histories.append(history.history)

    with open('history.pkl', 'wb') as f:
        pickle.dump(histories, f)
    histories = pickle.load(open('history.pkl', 'rb'))

    def get_avg(histories, his_key):
        tmp = []
        for history in histories:
            tmp.append(history[his_key][np.argmin(history['val_loss'])])
        return np.mean(tmp)

    print('Training: \t%0.4f loss / %0.4f acc' %
          (get_avg(histories, 'loss'), get_avg(histories, 'acc')))
    print('Validation: \t%0.4f loss / %0.4f acc' %
          (get_avg(histories, 'val_loss'), get_avg(histories, 'val_acc')))

    plot_acc_loss('training', histories, 'acc', 'loss')
    plot_acc_loss('validation', histories, 'val_acc', 'val_loss')
Ejemplo n.º 17
0
def main():
    """
    Load train/validation data set and train the model
    """
    parser = argparse.ArgumentParser(
        description='Behavioral Cloning Training Program')
    parser.add_argument('-d',
                        help='data directory',
                        dest='data_dir',
                        type=str,
                        default='training/track1')
    parser.add_argument('-s',
                        help='save directory',
                        dest='save_dir',
                        type=str,
                        default='saved_models/track1')
    parser.add_argument('-t',
                        help='test size fraction',
                        dest='test_size',
                        type=float,
                        default=0.2)
    parser.add_argument('-p',
                        help='drop out probability',
                        dest='keep_prob',
                        type=float,
                        default=0.5)
    parser.add_argument('-n',
                        help='number of epochs',
                        dest='epochs',
                        type=int,
                        default=10)
    parser.add_argument('-m',
                        help='samples per epoch',
                        dest='samples_per_epoch',
                        type=int,
                        default=20000)
    parser.add_argument('-b',
                        help='batch size',
                        dest='batch_size',
                        type=int,
                        default=40)
    parser.add_argument('-v',
                        help='number of validation batches',
                        dest='num_validation_batches',
                        type=int,
                        default=18)
    parser.add_argument('-l',
                        help='learning rate',
                        dest='learning_rate',
                        type=float,
                        default=1.0e-4)
    args = parser.parse_args()

    print('-' * 30)
    print('Parameters')
    print('-' * 30)
    for key, value in vars(args).items():
        print('{:<20} := {}'.format(key, value))
    print('-' * 30)

    if not os.path.exists(args.data_dir):
        raise Exception("Data directory does not exist.")
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)
    else:
        shutil.rmtree(args.save_dir)
        os.makedirs(args.save_dir)

    X_train, X_valid, Y_train, Y_valid = load_data(args)
    conv_layer_sizes = [(24, 5, 2), (36, 5, 2), (48, 5, 2), (64, 3, 1),
                        (64, 3, 1)]
    dense_layer_sizes = [100, 50, 10, 1]
    model = CNN(conv_layer_sizes, dense_layer_sizes)
    print("=" * 30)
    model.summary()
    print("=" * 30)

    start = time.time()
    model.fit(X_train,
              X_valid,
              Y_train,
              Y_valid,
              args.data_dir,
              save_dir=args.save_dir,
              learning_rate=args.learning_rate,
              epochs=args.epochs,
              samples_per_epoch=args.samples_per_epoch,
              batch_size=args.batch_size,
              p=args.keep_prob,
              num_validation_batches=args.num_validation_batches)
    end = time.time()
    total = math.ceil(end - start)
    print("")
    print("FINISHED.")
    print("Training took {} minutes {} seconds.".format(
        total // 60, total % 60))
Ejemplo n.º 18
0
# Defing and compile the SGD optimizer and CNN model
print('\n Compiling model...')
sgd = optimisers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
clf = CNN(width=img_rows, height=img_columns, depth=1, total_classes=total_classes, weightsPath=args["weights"])
clf.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])

# Initially train and test the model; If weight saved already, load the weights using arguments.
b_size = 128		# Batch size
num_epoch = 20		# Number of epochs
verb = 1			# Verbose

# If weights saved and argument load_model; Load the pre-trained model.
if args["load_model"] < 0:
	print('\nTraining the Model...')
	clf.fit(trainData, trainLabels, batch_size=b_size, nb_epoch=num_epoch,verbose=verb)
	
	# Evaluate accuracy and loss function of test data
	print('Evaluating Accuracy and Loss Function...')
	loss, accuracy = clf.evaluate(test_img, test_labels, batch_size=128, verbose=1)
	print('Accuracy of Model: {:.2f}%'.format(accuracy * 100))

	
# Save the pre-trained model.
if args["save_model"] > 0:
	print('Saving weights to file...')
	clf.save_weights(args["weights"], overwrite=True)

	
# Show the images using OpenCV and making random selections.
for num in np.random.choice(np.arange(0, len(test_labels)), size=(5,)):
Ejemplo n.º 19
0
test-data has been converted to test_data
test-label has been converted to test_label
"""
if args["dataset"] == "Fashion-MNIST":
    if not args["train_data"] is None:
        tr_data, tr_label = load_mnist(args["train_data"])
        tr_data = tr_data.reshape(tr_data.shape[0], 28, 28, 1)
        net_4 = CNN(input_dim=(28, 28, 1),
                    filter_size=hidden_nodes,
                    output_dim=10,
                    activation=["relu"] * len(hidden_nodes),
                    filters=[32] * len(hidden_nodes),
                    embedding_dim=32,
                    dropout=0.2)
        net_4.model.summary()
        net_4.fit(tr_data, tr_label)
        test_data, test_label = load_mnist(args["test_data"])
        test_data = test_data.reshape(test_data.shape[0], 28, 28, 1)
        pred_lab = net_4.predict(test_data)
        print_res(test_label, pred_lab)
    else:
        test_data, test_label = load_mnist(args["test_data"])
        test_data = test_data.reshape(test_data.shape[0], 28, 28, 1)
        net4 = tf.keras.models.load_model("mnist_best.h5")

        pred_lab = np.argmax(net4.predict(test_data), axis=1)
        print_res(test_label, pred_lab)
elif args["dataset"] in ["Cifar-10", "Cifar 10"]:
    if not args["train_data"] is None:
        images, label = get_batch_data_cifar(1)
        for i in range(2, 6):