__ __ __ __ _____ _ _ _____ _ _ _ | \/ | \ \ / / | __ \ (_) | | / ____| | | | | | \ / |_ __\ V / | |__) | __ ___ ____ _| |_ ___ | (___ | |__ ___| | | | |\/| | '__|> < | ___/ '__| \ \ / / _` | __/ _ \ \___ \| '_ \ / _ \ | | | | | | |_ / . \ | | | | | |\ V / (_| | || __/ ____) | | | | __/ | | |_| |_|_(_)_/ \_\ |_| |_| |_| \_/ \__,_|\__\___| |_____/|_| |_|\___V 2.1 if you need WebShell for Seo everyday contact me on Telegram Telegram Address : @jackleetFor_More_Tools:
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from typing import Tuple, Union
from .big5freq import (
BIG5_CHAR_TO_FREQ_ORDER,
BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO,
)
from .euckrfreq import (
EUCKR_CHAR_TO_FREQ_ORDER,
EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO,
)
from .euctwfreq import (
EUCTW_CHAR_TO_FREQ_ORDER,
EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO,
)
from .gb2312freq import (
GB2312_CHAR_TO_FREQ_ORDER,
GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO,
)
from .jisfreq import (
JIS_CHAR_TO_FREQ_ORDER,
JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO,
)
from .johabfreq import JOHAB_TO_EUCKR_ORDER_TABLE
class CharDistributionAnalysis:
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
def __init__(self) -> None:
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._char_to_freq_order: Tuple[int, ...] = tuple()
self._table_size = 0 # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self.typical_distribution_ratio = 0.0
self._done = False
self._total_chars = 0
self._freq_chars = 0
self.reset()
def reset(self) -> None:
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._done = False
self._total_chars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._freq_chars = 0
def feed(self, char: Union[bytes, bytearray], char_len: int) -> None:
"""feed a character with known length"""
if char_len == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(char)
else:
order = -1
if order >= 0:
self._total_chars += 1
# order is valid
if order < self._table_size:
if 512 > self._char_to_freq_order[order]:
self._freq_chars += 1
def get_confidence(self) -> float:
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD:
return self.SURE_NO
if self._total_chars != self._freq_chars:
r = self._freq_chars / (
(self._total_chars - self._freq_chars) * self.typical_distribution_ratio
)
if r < self.SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return self.SURE_YES
def got_enough_data(self) -> bool:
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._total_chars > self.ENOUGH_DATA_THRESHOLD
def get_order(self, _: Union[bytes, bytearray]) -> int:
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self) -> None:
super().__init__()
self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
self._table_size = EUCTW_TABLE_SIZE
self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = byte_str[0]
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self) -> None:
super().__init__()
self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
self._table_size = EUCKR_TABLE_SIZE
self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = byte_str[0]
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1
return -1
class JOHABDistributionAnalysis(CharDistributionAnalysis):
def __init__(self) -> None:
super().__init__()
self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
self._table_size = EUCKR_TABLE_SIZE
self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
first_char = byte_str[0]
if 0x88 <= first_char < 0xD4:
code = first_char * 256 + byte_str[1]
return JOHAB_TO_EUCKR_ORDER_TABLE.get(code, -1)
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self) -> None:
super().__init__()
self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
self._table_size = GB2312_TABLE_SIZE
self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = byte_str[0], byte_str[1]
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self) -> None:
super().__init__()
self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
self._table_size = BIG5_TABLE_SIZE
self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = byte_str[0], byte_str[1]
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
return 157 * (first_char - 0xA4) + second_char - 0x40
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self) -> None:
super().__init__()
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
self._table_size = JIS_TABLE_SIZE
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = byte_str[0], byte_str[1]
if 0x81 <= first_char <= 0x9F:
order = 188 * (first_char - 0x81)
elif 0xE0 <= first_char <= 0xEF:
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self) -> None:
super().__init__()
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
self._table_size = JIS_TABLE_SIZE
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = byte_str[0]
if char >= 0xA0:
return 94 * (char - 0xA1) + byte_str[1] - 0xA1
return -1
| Name | Type | Size | Permission | Actions |
|---|---|---|---|---|
| __pycache__ | Folder | 0755 |
|
|
| cli | Folder | 0755 |
|
|
| metadata | Folder | 0755 |
|
|
| __init__.py | File | 4.68 KB | 0644 |
|
| __main__.py | File | 123 B | 0644 |
|
| big5freq.py | File | 30.54 KB | 0644 |
|
| big5prober.py | File | 1.72 KB | 0644 |
|
| chardistribution.py | File | 9.8 KB | 0644 |
|
| charsetgroupprober.py | File | 3.82 KB | 0644 |
|
| charsetprober.py | File | 5.29 KB | 0644 |
|
| codingstatemachine.py | File | 3.64 KB | 0644 |
|
| codingstatemachinedict.py | File | 542 B | 0644 |
|
| cp949prober.py | File | 1.82 KB | 0644 |
|
| enums.py | File | 1.64 KB | 0644 |
|
| escprober.py | File | 3.91 KB | 0644 |
|
| escsm.py | File | 11.89 KB | 0644 |
|
| eucjpprober.py | File | 3.84 KB | 0644 |
|
| euckrfreq.py | File | 13.25 KB | 0644 |
|
| euckrprober.py | File | 1.71 KB | 0644 |
|
| euctwfreq.py | File | 36.05 KB | 0644 |
|
| euctwprober.py | File | 1.71 KB | 0644 |
|
| gb2312freq.py | File | 20.25 KB | 0644 |
|
| gb2312prober.py | File | 1.72 KB | 0644 |
|
| hebrewprober.py | File | 14.2 KB | 0644 |
|
| jisfreq.py | File | 25.19 KB | 0644 |
|
| johabfreq.py | File | 41.5 KB | 0644 |
|
| johabprober.py | File | 1.71 KB | 0644 |
|
| jpcntx.py | File | 26.42 KB | 0644 |
|
| langbulgarianmodel.py | File | 102.1 KB | 0644 |
|
| langgreekmodel.py | File | 96.16 KB | 0644 |
|
| langhebrewmodel.py | File | 95.88 KB | 0644 |
|
| langhungarianmodel.py | File | 98.98 KB | 0644 |
|
| langrussianmodel.py | File | 125.02 KB | 0644 |
|
| langthaimodel.py | File | 100.35 KB | 0644 |
|
| langturkishmodel.py | File | 93.13 KB | 0644 |
|
| latin1prober.py | File | 5.25 KB | 0644 |
|
| macromanprober.py | File | 5.93 KB | 0644 |
|
| mbcharsetprober.py | File | 3.63 KB | 0644 |
|
| mbcsgroupprober.py | File | 2.08 KB | 0644 |
|
| mbcssm.py | File | 29.68 KB | 0644 |
|
| py.typed | File | 0 B | 0644 |
|
| resultdict.py | File | 402 B | 0644 |
|
| sbcharsetprober.py | File | 6.25 KB | 0644 |
|
| sbcsgroupprober.py | File | 4.04 KB | 0644 |
|
| sjisprober.py | File | 3.91 KB | 0644 |
|
| universaldetector.py | File | 14.5 KB | 0644 |
|
| utf1632prober.py | File | 8.31 KB | 0644 |
|
| utf8prober.py | File | 2.75 KB | 0644 |
|
| version.py | File | 244 B | 0644 |
|