Did I find the right examples for you? yes no      Crawl my project      Python Jobs

All Samples(0)  |  Call(0)  |  Derive(0)  |  Import(0)

src/h/e/hellanzb-HEAD/Hellanzb/NZBQueue.py   hellanzb(Download)
    def fixNZBFileName(nzb):
        if os.path.normpath(os.path.dirname(nzb.destDir)) == os.path.normpath(Hellanzb.POSTPONED_DIR):
            nzb.destDir = Hellanzb.WORKING_DIR
 
    nzbfilename = nzb.nzbFileName
    d = os.path.join(Hellanzb.POSTPONED_DIR, archiveName(nzbfilename))
    if os.path.isdir(d):
        try:
            os.rmdir(Hellanzb.WORKING_DIR)
            os.rmdir(Hellanzb.WORKING_DIR)
        except OSError:
            files = os.listdir(Hellanzb.WORKING_DIR)[0]
            if len(files):
                name = files[0]
                ext = getFileExtension(name)
                if ext != None:
                    name = name.replace(ext, '')
                move(Hellanzb.WORKING_DIR, os.path.join(Hellanzb.TEMP_DIR, name))
 
            else:
                debug('ERROR Stray WORKING_DIR!: ' + str(os.listdir(Hellanzb.WORKING_DIR)))
                name = os.path.join(Hellanzb.TEMP_DIR, 'stray_WORKING_DIR')
                hellaRename(name)

src/h/e/hellanzb-HEAD/Hellanzb/NZBLeecher/NZBModel.py   hellanzb(Download)
 
        ## Where the nzb files will be downloaded
        self.destDir = Hellanzb.WORKING_DIR
 
        ## A cancelled NZB is marked for death. ArticleDecoder will dispose of any
 
            # Move the postponed files to the new postponed dir
            for file in os.listdir(Hellanzb.WORKING_DIR):
                move(os.path.join(Hellanzb.WORKING_DIR, file), os.path.join(postponed, file))
        finally:
 
    # Cache all WORKING_DIR segment filenames in a map of lists
    for file in os.listdir(Hellanzb.WORKING_DIR):
        if not validWorkingFile(os.path.join(Hellanzb.WORKING_DIR, file),
                                overwriteZeroByteSegments):

src/h/e/hellanzb-HEAD/Hellanzb/Daemon.py   hellanzb(Download)
    hellaRename(processingDir)
 
    move(Hellanzb.WORKING_DIR, processingDir)
    nzb.destDir = processingDir
    nzb.archiveDir = processingDir
    nzb.nzbFileName = nzbFileName
 
    os.mkdir(Hellanzb.WORKING_DIR)
 
    # The list of skipped pars is maintained in the state XML as only the subjects of the
    try:
        hellaRename(os.path.join(Hellanzb.TEMP_DIR, 'canceled_WORKING_DIR'))
        move(Hellanzb.WORKING_DIR, os.path.join(Hellanzb.TEMP_DIR, 'canceled_WORKING_DIR'))
        os.mkdir(Hellanzb.WORKING_DIR)
        rmtree(os.path.join(Hellanzb.TEMP_DIR, 'canceled_WORKING_DIR'))
            Hellanzb.downloadScannerID.cancel()
 
        nzb.destDir = Hellanzb.WORKING_DIR
        parseNZB(nzb, 'Downloading recovery pars')
    else:

src/h/e/hellanzb-HEAD/Hellanzb/NZBLeecher/NZBParser.py   hellanzb(Download)
        self.nzbContentPriority = NZBSegmentQueue.NZB_CONTENT_P
 
        files = os.listdir(Hellanzb.WORKING_DIR)
        files.sort()
        for file in files:
 
            # Anonymous duplicate file segments lying around are too painful to keep track
            # of. As are segments that previously failed on different servers
            if DUPE_SEGMENT_RE.match(file) or FAILED_ALT_SERVER_SEGMENT_RE.match(file):
                os.remove(os.path.join(Hellanzb.WORKING_DIR, file))
                continue
 
            if not validWorkingFile(os.path.join(Hellanzb.WORKING_DIR, file),
                                    self.nzb.overwriteZeroByteFiles):
                continue

src/h/e/hellanzb-HEAD/Hellanzb/NZBLeecher/DupeHandler.py   hellanzb(Download)
 
                    # Set our filename now, since we know it, for sanity sake
                    dupeFilename = nextDupeName(os.path.join(Hellanzb.WORKING_DIR, file),
                                                checkOnDisk = False,
                                                minIteration = dupeEntry[0] + 1)