Exemplo n.º 1
0
def main():
    allProducts = []

    fns = [
        getAllTate,
        getAllICAOne,
        # getAllICATwo,
        getAllDesignMuseum,
        getAllNationalGallery,
        getAllRA,
        getAllManchester,
        getAllNottingham,
        getAllSouthbank,
        getAllSLG,
        getAllCamdenArts,
    ]

    for fn in fns:
        products = fn(print)
        for p in products:
            if not p.isIn(allProducts):
                allProducts.append(p)

    print(f"Done! Scraped {len(allProducts)} products!")
    toJSON(allProducts, "products.json")
Exemplo n.º 2
0
    def sum(self, request, *args, **kwargs):
        sum_field = request.query_params['field']
        queryset = self.filter_queryset(self.get_queryset()).aggregate(
            total=Sum(sum_field, field=sum_field))

        result = queryset
        return HttpResponse(utils.toJSON(result))
Exemplo n.º 3
0
 def distinct(self, request, *args, **kwargs):
     distinct_field = request.query_params['field']
     queryset = self.filter_queryset(
         self.get_queryset()).values_list(distinct_field).distinct()
     mid_result = list(queryset)
     result = []
     for item in mid_result:
         result.append(item[0])
     return HttpResponse(utils.toJSON(result))
Exemplo n.º 4
0
    def save_results(results, file_name, file_type):
        """
        This function saves elastic search results to file.
        The supported types are: csv, json, pickle

        :param results: elasticsearch results
        :param file_name: file name to store
        :param file_type: file type to store

        Example:
            >>> builder = QueryBuilder()
            >>> builder.save_results(results, "results", "csv")
        """
        prepared_data = prepare_for_save(results)  # modify actors field
        file = "{}.{}".format(file_name, file_type)

        if file_type == "json":
            jsonified_data = toJSON(prepared_data)  # jsonify data from ELK
            save_json_to_file(jsonified_data, file)

        elif file_type == "csv":
            save_attr_dict_to_csv(prepared_data, file)  # save data as CSV
        else:
            print("this type is not supported")
Exemplo n.º 5
0
 def field_info(self, request, *args, **kwargs):
     fields = self.get_serializer_class().Meta.model._meta.get_fields()
     result = [{
         f.name: f.get_internal_type()
     } for f in fields]
     return HttpResponse(utils.toJSON(result))
Exemplo n.º 6
0
 def xslOptions(self, request, *args, **kwargs):
     return HttpResponse(utils.toJSON(self.xsl_config))
Exemplo n.º 7
0
 def send(self, obj):
     json = utils.toJSON(obj)
     if config.debug:
         print 'sending to <%s>: %s' % (self.nickname, json)
     self.sendLine(json)
Exemplo n.º 8
0
def buildAndTrain(yamlConfig,
                  input_shape,
                  train_data,
                  val_data,
                  steps_per_epoch,
                  eval_steps_per_epoch,
                  outdir,
                  prune=True):

    #Get full model
    model = getModel(yamlConfig['KerasModel'], yamlConfig, input_shape)
    model._name = "full"

    #Get pruned models
    prune = True
    if prune == True:
        #Prune dense layers only
        pruning_schedule = tfmot.sparsity.keras.PolynomialDecay(
            initial_sparsity=0.0,
            final_sparsity=0.5,
            begin_step=2000,
            end_step=4000)
        model_for_layerwise_pruning = getModel("float_cnn_densePrune",
                                               yamlConfig, input_shape)
        model_for_layerwise_pruning._name = "layerwise_pruning"

        #Prune full model
        model_for_full_pruning = tfmot.sparsity.keras.prune_low_magnitude(
            model, pruning_schedule=pruning_schedule)
        model_for_full_pruning._name = "full_pruning"

        models = [model, model_for_layerwise_pruning, model_for_full_pruning]
    else:
        models = [model]

    histories, scores = list(), list()
    for model in models:
        print("Training model: {} ".format(model.name))
        model.summary()
        callbacks = getCallbacks()
        if model.name.find("pruning") != -1:
            print("Model sparsity: {} ".format(model.name))
            print_model_sparsity(model)
            callbacks = [
                pruning_callbacks.UpdatePruningStep(),
                pruning_callbacks.PruningSummaries(log_dir=outdir +
                                                   '/logs_%s/' % model.name,
                                                   profile_batch=0)
            ]
        print("Start training loop:\n\n")
        toJSON(model, outdir + '/model_%s.json' % model.name)
        model.compile(loss=LOSS, optimizer=OPTIMIZER, metrics=["accuracy"])

        history = model.fit(train_data,
                            epochs=epochs,
                            validation_data=val_data,
                            steps_per_epoch=steps_per_epoch,
                            validation_steps=eval_steps_per_epoch,
                            callbacks=callbacks,
                            verbose=1)

        val_score = model.evaluate(val_data)
        print('\n Test loss:', val_score[0])
        print('\n Test accuracy:', val_score[1])
        histories.append(history)
        scores.append(val_score)
    return histories, scores