def create_new_feed(feed, source): """ Create a new feed """ try: with transaction.atomic(): slug = feed.get("id") + feed.get('title') new_feed = Feed.objects.create( feed_id=feed.get("id"), title=feed.get("title"), summary=feed.get("summary", ""), author=feed.get("author", ""), slug=slugify(slug[0:254]), link=get_link_from_feed(feed), links=get_links_from_feed(feed), source=source, ) FeedDetail.objects.create( feed=new_feed, content_json=json.dumps(feed), ) source.last_active_on = datetime.now() source.save() except ValidationError as e: exc = e logger(__name__, "Could not create new Feed due to {}".format(str(exc))) raise ValidationError(str(exc)) logger(__name__, "Successfull create new feed") return
def load_data(self): self.data = [[]] * self.num_subjects self.labels = [[]] * self.num_subjects for subject_idx in range(self.num_subjects): if subject_idx % 100 == 1: logger('Loaded {} out of {} subjects |'.format(subject_idx, self.num_subjects)) file_id = self.df.FileID.iloc[subject_idx].lower() self.data[subject_idx], self.labels[subject_idx] = self.__load_h5data(file_id)
def get_feed(request): """ Return feeds with details and Original JSON """ try: feed = Feed.objects.get(slug=request.slug) details = FeedDetail.objects.get(feed=feed.id) comments = Comment.objects.filter(feed=feed.id) except ObjectDoesNotExist as e: exc = e logger(__name__, "Could not get feed due to {}".format(str(exc))) return feeds_pb2.Feed() return get_feed_details(feed, details.content_json, comments)
def update_feed_source(request): """ Update Feed Source Active Status """ try: feed = FeedSource.objects.get(id=request.id) feed.status = not feed.status feed.save() except (ValidationError, FeedSource.DoesNotExist) as e: exc = e logger(__name__, "Could not update Feed Source due to {}".format(str(exc))) errors = _get_errors(exc) return feeds_pb2.OperationStatus( op_status=feeds_pb2.Status.Value('FAILURE'), details={'errors': feeds_pb2.RepeatedString(data=errors)}, ) return feeds_pb2.OperationStatus( op_status=feeds_pb2.Status.Value('SUCCESS'), )
def create_bookmark_for_feed(request): """ Create a new bookmark to a feed """ try: feed = Feed.objects.get(id=request.feed.id) Bookmarked.objects.create( user=request.user.username, feed=feed, ) except (ValidationError, Feed.DoesNotExist) as e: exc = e logger(__name__, "Could not add Bookmark due to {}".format(str(exc))) errors = _get_errors(exc) return feeds_pb2.OperationStatus( op_status=feeds_pb2.Status.Value('FAILURE'), details={'errors': feeds_pb2.RepeatedString(data=errors)}, ) return feeds_pb2.OperationStatus( op_status=feeds_pb2.Status.Value('SUCCESS'), )
def create_new_feed_source(link): """ Validate and Create new Feed Source """ try: response = parse_new_feeds(link) if response["status"]: if "logo" in response["details"]: logo_link = response["details"]["logo"] elif "image" in response["details"]: logo_link = response["details"]["image"]["href"] else: logo_link = '' FeedSource.objects.create( name=response["details"]["title"], link=link, logo_link=logo_link, details=json.dumps(response["details"]), ) else: return feeds_pb2.OperationStatus( op_status=feeds_pb2.Status.Value('FAILURE'), details={ 'errors': feeds_pb2.RepeatedString( data=['Could not parse given link']) }, ) except ValidationError as e: exc = e logger(__name__, "Could not add Feed Source due to {}".format(str(exc))) errors = _get_errors(exc) return feeds_pb2.OperationStatus( op_status=feeds_pb2.Status.Value('FAILURE'), details={'errors': feeds_pb2.RepeatedString(data=errors)}, ) return feeds_pb2.OperationStatus( op_status=feeds_pb2.Status.Value('SUCCESS'), )
def main(): ########################################################## # TensorFlow configuration tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = True k.tensorflow_backend.set_session(tf.Session(config=tf_config)) ########################################################## # capture the config path from the run arguments # then process the json configuration fill try: args = get_args() config = process_config(args.config) # create the experiments dirs create_dirs([ config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir ]) logger('Creating data generators ...'.format(datetime.now())) data_loader = { 'train': factory.create("data_loader." + config.data_loader.name)( config, subset='train', shuffle=True), 'eval': factory.create("data_loader." + config.data_loader.name)( config, subset='eval') } logger('Creating the model ...'.format(datetime.now())) model = factory.create("models." + config.model.name)(config) logger('Creating the trainer ...'.format(datetime.now())) if config.model.num_gpus > 1: trainer = factory.create("trainers." + config.trainer.name)( model.parallel_model, data_loader, config) else: trainer = factory.create("trainers." + config.trainer.name)( model.model, data_loader, config) logger('Starting model training ...'.format(datetime.now())) trainer.train() logger('Training has finished!'.format(datetime.now())) except Exception as e: logger(e) sys.exit(1)