Exemplo n.º 1
0
        def do_init_db(db):
            try:
                self.node_id, self.root_id, self.root_inum, self.store, self.port = yield db.DoFn(
                    "select node.id,root.id,root.inode,node.files,node.port from node,root where root.id=node.root and node.name=${name}",
                    name=node,
                )
            except NoData:
                raise RuntimeError("data for '%s' is missing" % (self.node,))

            nnodes, = yield db.DoFn(
                "select count(*) from node where root=${root} and id != ${node}", root=self.root_id, node=self.node_id
            )
            self.single_node = not nnodes

            try:
                mode, = yield db.DoFn("select mode from inode where id=${inode}", inode=self.root_inum)
            except NoData:
                raise RuntimeError("database has not been initialized: inode %d is missing" % (self.inode,))
            if mode == 0:
                yield db.Do(
                    "update inode set mode=${dir} where id=${inode}",
                    dir=stat.S_IFDIR | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO,
                    inode=self.root_inum,
                )

            self.info = Info()
            yield self.info._load(db)
            if self.info.version != DBVERSION:
                raise RuntimeError("Need database version %s, got %s" % (DBVERSION, self.info.version))

            root = SqlInode(self, self.root_inum)
            yield root._load(db)
            returnValue(root)
Exemplo n.º 2
0
		def do_work2(db):
			queue = DeferredQueue()
			defs = []
			nworkers = len(entries)//5+1
			if nworkers > self.nworkers:
				nworkers = self.nworkers

			for i in range(nworkers):
				d = self.fetch(db,queue)
				d.addErrback(log.err,"fetch()")
				defs.append(d)

			workers = set()
			for id,inum,typ in entries:
				if not self.running:
					break
				trace('copyrun',"%d: %s",inum,typ)

				self.last_entry = id

				if typ == 'd':
					def dt(inum):
						path = build_path(self.fs.store,inum, create=False)
						try:
							os.unlink(path)
						except EnvironmentError as e:
							if e.errno != errno.ENOENT:
								raise
					yield deferToThread(dt,inum)
				else:
					inode = SqlInode(self.fs,inum)
					yield inode._load(db)
					if typ == 'f':
						if inum in workers:
							trace('copyrun',"%d: in workers",inum,typ)
							continue
						workers.add(inum)
						queue.put((id,inode))
					elif typ == 't':
						if inode.cache:
							yield inode.cache.trim(inode.size)
					else:
						raise RuntimeError("Typ '%s' not found (inode %d)" % (typ,inum))
					continue

			for i in range(nworkers):
				queue.put(None)
			yield DeferredList(defs)
Exemplo n.º 3
0
    def remote_readfile(self, caller, inum, reader, missing):
        trace("readfile", "%d: %s %s", inum, caller, reader)
        inode = SqlInode(self.fs, inum)
        yield self.fs.db(inode._load, DB_RETRIES)
        if not inode.inum:
            trace("readfile", "%d: Inode probably deleted", inum)
            raise DataMissing(missing)

        trace("readfile", "%d: avail %s & %s, known %s", inum, inode.cache.available, missing, inode.cache.known)
        avail = inode.cache.available & missing
        if avail:
            missing -= avail
            trace("readfile", "%d: send %s", inum, avail)
            h = yield inode.open(os.O_RDONLY)

            def split(av):
                for a, b, c in av:
                    while b > MAX_BLOCK:
                        yield a, MAX_BLOCK
                        a += MAX_BLOCK
                        b -= MAX_BLOCK
                    yield a, b

            for a, b in split(avail):
                try:
                    data = yield h.read(a, b, atime=False)
                except Exception as e:
                    print_exc()
                    break
                try:
                    d = reader.callRemote("data", a, data)
                    trace("readfile", "%d: remote_data: %s", inum, repr(d))
                    yield d
                    # yield reader.callRemote("data",a,data)
                except Exception as e:
                    print_exc()
                    break
            h.release()
        trace("readfile", "%d: Missing %s for %s / %s", inum, missing, caller, reader)
        returnValue(missing)
Exemplo n.º 4
0
		def do_work(db):
			seq = self.seq
			if seq is None:
				seq, = yield db.DoFn("select event.id from event where event.typ='s' and event.node in (select id from node where root=${root}) order by event.id desc limit 1", root=self.fs.root_id)
			last,do_copy = yield db.DoFn("select event,autocopy from node where id=${node}", node=self.fs.node_id)
			if seq == last:
				self.seq = None
				if self.do_readonly:
					self.do_readonly = False
					self.fs.readonly = False
					self.fs.copier.trigger()
				return

			if self.do_readonly and self.fs.readonly:
				upd = ""
				trace('eventcollect',"Events from %d to %d (initial sync)",last+1,seq)
			else:
				upd = " and node != ${node}"
				trace('eventcollect',"Events from %d to %d",last+1,seq)

			it = yield db.DoSelect("select event.id,event.inode,event.node,event.typ,event.range from event join inode on inode.id=event.inode where inode.typ='f' and event.id>${min} and event.id<=${max} and event.node in (select id from node where root=${root}"+upd+") order by event.id limit 100", root=self.fs.root_id, node=self.fs.node_id, min=last,max=seq, _empty=True)
			last = seq
			n=0
			for event,inum,node,typ,r in it:
				n+=1
				last = event
				if r:
					r = Range(r)
				trace('eventcollect',"%d: ev=%d node=%d typ=%s range=%s",inum,event,node,typ, "?" if r is None else str(r))
				if typ == 'i' or typ == 's' or typ == 'f':
					continue

				inode = SqlInode(self.fs,inum)
				if inode.inum is None:
					# locally deleted.
					# Temporarily create a fake cache record so that it'll be skipped when we continue.
					trace('eventcollect',"%s: deleted (ev=%d)", inode,eid)
					yield db.Do("replace into cache(inode,node,event) values(${inode},${node},${event})", inode=inum,node=self.fs.node_id,event=seq)
					continue
				yield inode._load(db)

				if typ == 'c':
					if node != self.fs.node_id:
						r = r.replace(self.fs.node_id,node)
					inode.cache.known.add_range(r,replace=True)

				elif typ == 't':
					yield db.Do("replace into todo(node,inode,typ) values(${node},${inode},'t')", inode=inode.inum, node=self.fs.node_id, _empty=True)
					continue

				elif typ == 'd':
					yield db.Do("replace into todo(node,inode,typ) values(${node},${inode},'d')", inode=inode.inum, node=self.fs.node_id, _empty=True)
					yield db.Do("delete from fail where node=${node} and inode=${inode}", inode=inode.inum, node=self.fs.node_id, _empty=True)
					continue

				elif typ == 'n':
					yield inode.cache.trim(0, do_file=False)
					if not do_copy:
						yield db.Do("replace into todo(node,inode,typ) values(${node},${inode},'t')", inode=inode.inum, node=self.fs.node_id, _empty=True)
					
				else:
					raise ValueError("event record %d: unknown type %s" % (event,repr(typ)))

				self.fs.changer.note(inode.cache,event)
				#self.fs.changer.now(inode.cache,event,db)

				# TODO only do this if we don't just cache
				if do_copy:
					trace('eventcollect',"TODO: inode %d",inode.inum)
					yield db.Do("replace into todo(node,inode,typ) values(${node},${inode},'f')", inode=inode.inum, node=self.fs.node_id, _empty=True)
					if not self.fs.readonly:
						db.call_committed(self.fs.copier.trigger)
				# yield inode.cache._close()

			yield db.Do("update node set event=${event} where id=${node}", node=self.fs.node_id, event=last, _empty=True)
			if n > 90:
				self.restart = 2