コード例 #1
0
ファイル: full_cifar.py プロジェクト: yamizi/FeatureNet
def main(target,
         min_index=0,
         max_index=0,
         filter_indices=[],
         datasets=None,
         epochs=12,
         depth=1,
         data_augmentation=False):
    baseurl = "./"
    productSet = ProductSet(baseurl + target + ".pdt")

    if not datasets:
        datasets = ["mnist"]

    for index, product in enumerate(productSet.format_products()):
        print("product {0}".format(index))

        if index >= min_index and (len(filter_indices) == 0
                                   or index in filter_indices):
            f = open("{0}products/{1}_{2}.json".format(baseurl, target, index),
                     "w")
            str_ = json.dumps(product)
            f.write(str_)
            f.close()

            run_tensorflow(product,
                           target,
                           index,
                           datasets,
                           epochs,
                           depth,
                           data_augmentation=data_augmentation)

        if max_index != 0 and index == max_index:
            break
コード例 #2
0
ファイル: parser.py プロジェクト: yamizi/FeatureNet
def loadProducts():
  url = "C:/Users/salah.ghamizi/Downloads/pledge-master/tensorflow//NAS Products.pdt"
  productSet = ProductSet(url)
  print("number of features {0}, number of products: {1}".format(productSet.nbFeatures, productSet.nbProducts))

  rand_product = productSet.format_product(2)#random.randrange(0, productSet.nbProducts-1))
  print((rand_product))

  products = productSet.format_products()
  for i, prd in enumerate(products):
    if len(prd) < 10:
      print("{0}:{1}".format(i, len(prd)))
コード例 #3
0
def train_model_from_json(pledge_output_file,
                          products_file,
                          index=None,
                          export_file="",
                          training_epochs=5,
                          batch_size=64,
                          dataset="mnist"):

    initial_product_set = ProductSet(pledge_output_file, binary_products=True)

    f1 = open(products_file, 'r')
    vects = json.loads(f1.read())
    products = [KerasFeatureVector.from_vector(vect) for vect in vects]
    vectors_export = []

    if index is not None:
        if len(index) == 0:
            products = [products[int(index)]]
        else:
            products = products[max(0, int(index[0])
                                    ):min(len(products), int(index[1]))]

    for i, keras_product in enumerate(products):
        initial_product_set.binary_products = True
        product, original_product = initial_product_set.format_product(
            original_product=keras_product.features)

        tensorflow = TensorflowGenerator(
            product,
            training_epochs,
            dataset,
            product_features=original_product,
            features_label=initial_product_set.features,
            batch_size=batch_size)
        print("original accuracy {} new accuracy {}".format(
            keras_product.accuracy, tensorflow.model.accuracy))
        vector_pdt = tensorflow.model.to_kerasvector()
        vectors_export.append(vector_pdt.to_vector())

    if export_file:
        f1 = open(export_file, 'w')
        f1.write(json.dumps(vectors_export))
        f1.close()
コード例 #4
0
def train_model_from_product(pledge_output_file,
                             index,
                             export_file="",
                             training_epochs=5,
                             batch_size=64,
                             dataset="mnist"):
    initial_product_set = ProductSet(pledge_output_file, binary_products=True)

    product, original_product = initial_product_set.format_product(
        prd_index=index)
    tensorflow = TensorflowGenerator(
        product,
        training_epochs,
        dataset,
        product_features=original_product,
        features_label=initial_product_set.features,
        batch_size=batch_size)
    vector_pdt = tensorflow.model.to_kerasvector()

    #initial_product_set.products = [vector_pdt.features]
    #product, original_product = initial_product_set.format_product(prd_index=index, sort_features=False)
    parsed_product = initial_product_set.format_product(
        original_product=vector_pdt.features)

    if not parsed_product:
        return None
    product, original_product = parsed_product
    tensorflow = TensorflowGenerator(
        product,
        training_epochs,
        dataset,
        product_features=original_product,
        features_label=initial_product_set.features,
        batch_size=batch_size)

    f1 = open(export_file, 'w')
    f1.write(json.dumps([vector_pdt.to_vector()]))
    f1.close()
コード例 #5
0
ファイル: evolution.py プロジェクト: yamizi/FeatureNet
def run(inputfile=None, outputfile=None):

    initial_product_set = ProductSet(baseurl + base_products + ".pdt")
    initial_products_vectors = []
    last_evolution_epoch = 0

    if inputfile:
        f = open(inputfile + ".json", 'r')
        line = f.readline()
        while line:
            line = f.readline()

        last_evol = line.split(" ")
        last_evolution_epoch = int(last_evol[0])
        initial_products_vectors = json.loads(last_evol[1])
        initial_products_vectors = [
            KerasFeatureVector.from_vector(i) for i in initial_products_vectors
        ]
        f.close()

    else:
        inputfile = baseurl + base_products + "_initial"
        for index, (product, original_product) in enumerate(
                initial_product_set.format_products()):
            tensorflow = TensorflowGenerator(
                product,
                training_epochs,
                dataset,
                product_features=original_product,
                features_label=initial_product_set.features)
            initial_products_vectors.append(tensorflow.model.to_vector())

            if nb_base_products > 0 and index == nb_base_products:
                break

    last_population = initial_products_vectors[:
                                               nb_base_products] if nb_base_products else initial_products_vectors
    f1 = open(inputfile + ".json", 'a')
    f1.write("\n{} {}".format(
        last_evolution_epoch,
        json.dumps([i.to_vector() for i in last_population])))
    f1.close()

    last_evolution_epoch = last_evolution_epoch + 1
    for i in range(evolution_epochs):
        print("### evolution epoch {}".format(i + last_evolution_epoch))
        new_pop = evolve(last_population)
        print("evolved population {}, parent fitness {}".format(
            len(new_pop), [pop.fitness for pop in new_pop]))

        processes = []

        for e in new_pop:
            if not e.accuracy:
                product = initial_product_set.features.keys()
                prod, original_product = initial_product_set.format_product(
                    original_product=e.features)
                train(prod, training_epochs, dataset, e, original_product,
                      initial_product_set.features)
                #p = multiprocessing.Process(target=train, args=(prod,training_epochs, dataset, e, initial_product_set.features))
                #p.start()
                #processes.append(p)

        for p in processes:
            p.join()

        f1 = open(inputfile + ".json", 'a')
        f1.write("\n{} {}".format(last_evolution_epoch + i,
                                  json.dumps([i.to_vector()
                                              for i in new_pop])))
        f1.close()
        last_population = new_pop

    if not outputfile:
        outputfile = "{0}report_evol_{1}epochs_{2}evolution_{3}.txt".format(
            baseurl, dataset, training_epochs, evolution_epochs)

    f2 = open(outputfile, "a")
    f2.write("\r\n".join(str(x) for x in last_population))