如何在Python中将文件转换为utf-8



我需要在Python中将一堆文件转换为utf-8,而我在"转换文件"部分遇到了问题。

我想做类似的事情:

iconv -t utf-8 $file > converted/$file # this is shell code

谢谢!

您可以使用编解码器模块,如下所示:

import codecs
BLOCKSIZE = 1048576 # or some other, desired size in bytes
with codecs.open(sourceFileName, "r", "your-source-encoding") as sourceFile:
    with codecs.open(targetFileName, "w", "utf-8") as targetFile:
        while True:
            contents = sourceFile.read(BLOCKSIZE)
            if not contents:
                break
            targetFile.write(contents)

EDIT:添加了BLOCKSIZE参数来控制文件块大小。

这在一个小测试中对我有效:

sourceEncoding = "iso-8859-1"
targetEncoding = "utf-8"
source = open("source")
target = open("target", "w")
target.write(unicode(source.read(), sourceEncoding).encode(targetEncoding))

感谢您的回复,它很有效!

由于源文件是混合格式的,我添加了一个要按顺序尝试的源格式列表(sourceFormats),在UnicodeDecodeError上我尝试下一种格式:

from __future__ import with_statement
import os
import sys
import codecs
from chardet.universaldetector import UniversalDetector
targetFormat = 'utf-8'
outputDir = 'converted'
detector = UniversalDetector()
def get_encoding_type(current_file):
    detector.reset()
    for line in file(current_file):
        detector.feed(line)
        if detector.done: break
    detector.close()
    return detector.result['encoding']
def convertFileBestGuess(filename):
   sourceFormats = ['ascii', 'iso-8859-1']
   for format in sourceFormats:
     try:
        with codecs.open(fileName, 'rU', format) as sourceFile:
            writeConversion(sourceFile)
            print('Done.')
            return
      except UnicodeDecodeError:
        pass
def convertFileWithDetection(fileName):
    print("Converting '" + fileName + "'...")
    format=get_encoding_type(fileName)
    try:
        with codecs.open(fileName, 'rU', format) as sourceFile:
            writeConversion(sourceFile)
            print('Done.')
            return
    except UnicodeDecodeError:
        pass
    print("Error: failed to convert '" + fileName + "'.")

def writeConversion(file):
    with codecs.open(outputDir + '/' + fileName, 'w', targetFormat) as targetFile:
        for line in file:
            targetFile.write(line)
# Off topic: get the file list and call convertFile on each file
# ...

(由Rudro Badhon编辑:这包含了最初的尝试多种格式,直到你没有得到异常,以及使用chardet.univaldetector的替代方法)

未知源编码类型的答案

基于@Sébastien RoccaSerra

蟒蛇3.6

import os    
from chardet import detect
# get file encoding type
def get_encoding_type(file):
    with open(file, 'rb') as f:
        rawdata = f.read()
    return detect(rawdata)['encoding']
from_codec = get_encoding_type(srcfile)
# add try: except block for reliability
try: 
    with open(srcfile, 'r', encoding=from_codec) as f, open(trgfile, 'w', encoding='utf-8') as e:
        text = f.read() # for small files, for big use chunks
        e.write(text)
    os.remove(srcfile) # remove old encoding file
    os.rename(trgfile, srcfile) # rename new encoding
except UnicodeDecodeError:
    print('Decode Error')
except UnicodeEncodeError:
    print('Encode Error')

您可以使用这个一行(假设您想从utf16转换为outf8

    python -c "from pathlib import Path; path = Path('yourfile.txt') ; path.write_text(path.read_text(encoding='utf16'), encoding='utf8')"

其中yourfile.txt是指向$文件的路径。

要实现这一点,您需要python3.4或更新版本(现在可能需要)。

以下是以上代码的可读性更强的版本

from pathlib import Path
path = Path("yourfile.txt")
path.write_text(path.read_text(encoding="utf16"), encoding="utf8")

这是一个Python3函数,用于将任何文本文件转换为UTF-8编码的文件。(不使用不必要的包装)

def correctSubtitleEncoding(filename, newFilename, encoding_from, encoding_to='UTF-8'):
    with open(filename, 'r', encoding=encoding_from) as fr:
        with open(newFilename, 'w', encoding=encoding_to) as fw:
            for line in fr:
                fw.write(line[:-1]+'rn')

您可以在循环中轻松使用它来转换文件列表。

要猜测源代码是什么,可以使用file*nix命令。

示例:

$ file --mime jumper.xml
jumper.xml: application/xml; charset=utf-8

将目录中的所有文件转换为utf-8编码。它是递归的,可以通过后缀过滤文件。感谢@Sole Sensei

# pip install -i https://pypi.tuna.tsinghua.edu.cn/simple chardet
import os
import re
from chardet import detect

def get_file_list(d):
    result = []
    for root, dirs, files in os.walk(d):
        dirs[:] = [d for d in dirs if d not in ['venv', 'cmake-build-debug']]
        for filename in files:
            # your filter
            if re.search(r'(.c|.cpp|.h|.txt)$', filename):
                result.append(os.path.join(root, filename))
    return result

# get file encoding type
def get_encoding_type(file):
    with open(file, 'rb') as f:
        raw_data = f.read()
    return detect(raw_data)['encoding']

if __name__ == "__main__":
    file_list = get_file_list('.')
    for src_file in file_list:
        print(src_file)
        trg_file = src_file + '.swp'
        from_codec = get_encoding_type(src_file)
        try:
            with open(src_file, 'r', encoding=from_codec) as f, open(trg_file, 'w', encoding='utf-8') as e:
                text = f.read()
                e.write(text)
            os.remove(src_file)
            os.rename(trg_file, src_file)
        except UnicodeDecodeError:
            print('Decode Error')
        except UnicodeEncodeError:
            print('Encode Error')

这是我的强力方法。它还处理输入中的混合和。

    # open the CSV file
    inputfile = open(filelocation, 'rb')
    outputfile = open(outputfilelocation, 'w', encoding='utf-8')
    for line in inputfile:
        if line[-2:] == b'rn' or line[-2:] == b'nr':
            output = line[:-2].decode('utf-8', 'replace') + 'n'
        elif line[-1:] == b'r' or line[-1:] == b'n':
            output = line[:-1].decode('utf-8', 'replace') + 'n'
        else:
            output = line.decode('utf-8', 'replace') + 'n'
        outputfile.write(output)
    outputfile.close()
except BaseException as error:
    cfg.log(self.outf, "Error(18): opening CSV-file " + filelocation + " failed: " + str(error))
    self.loadedwitherrors = 1
    return ([])
try:
    # open the CSV-file of this source table
    csvreader = csv.reader(open(outputfilelocation, "rU"), delimiter=delimitervalue, quoting=quotevalue, dialect=csv.excel_tab)
except BaseException as error:
    cfg.log(self.outf, "Error(19): reading CSV-file " + filelocation + " failed: " + str(error))
import codecs
import glob
import chardet
ALL_FILES = glob.glob('*.txt')
def kira_encoding_function():
    """Check encoding and convert to UTF-8, if encoding no UTF-8."""
    for filename in ALL_FILES:
        # Not 100% accuracy:
        # https://stackoverflow.com/a/436299/5951529
        # Check:
        # https://chardet.readthedocs.io/en/latest/usage.html#example-using-the-detect-function
        # https://stackoverflow.com/a/37531241/5951529
        with open(filename, 'rb') as opened_file:
            bytes_file = opened_file.read()
            chardet_data = chardet.detect(bytes_file)
            fileencoding = (chardet_data['encoding'])
            print('fileencoding', fileencoding)
            if fileencoding in ['utf-8', 'ascii']:
                print(filename + ' in UTF-8 encoding')
            else:
                # Convert file to UTF-8:
                # https://stackoverflow.com/q/19932116/5951529
                cyrillic_file = bytes_file.decode('cp1251')
                with codecs.open(filename, 'w', 'utf-8') as converted_file:
                    converted_file.write(cyrillic_file)
                print(filename +
                      ' in ' +
                      fileencoding +
                      ' encoding automatically converted to UTF-8')

kira_encoding_function()

来源:

相关内容

  • 没有找到相关文章

最新更新