__ __ __ __ _____ _ _ _____ _ _ _ | \/ | \ \ / / | __ \ (_) | | / ____| | | | | | \ / |_ __\ V / | |__) | __ ___ ____ _| |_ ___ | (___ | |__ ___| | | | |\/| | '__|> < | ___/ '__| \ \ / / _` | __/ _ \ \___ \| '_ \ / _ \ | | | | | | |_ / . \ | | | | | |\ V / (_| | || __/ ____) | | | | __/ | | |_| |_|_(_)_/ \_\ |_| |_| |_| \_/ \__,_|\__\___| |_____/|_| |_|\___V 2.1 if you need WebShell for Seo everyday contact me on Telegram Telegram Address : @jackleetFor_More_Tools:
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import logging
import re
from typing import Optional, Union
from .enums import LanguageFilter, ProbingState
INTERNATIONAL_WORDS_PATTERN = re.compile(
b"[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?"
)
class CharSetProber:
SHORTCUT_THRESHOLD = 0.95
def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
self._state = ProbingState.DETECTING
self.active = True
self.lang_filter = lang_filter
self.logger = logging.getLogger(__name__)
def reset(self) -> None:
self._state = ProbingState.DETECTING
@property
def charset_name(self) -> Optional[str]:
return None
@property
def language(self) -> Optional[str]:
raise NotImplementedError
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
raise NotImplementedError
@property
def state(self) -> ProbingState:
return self._state
def get_confidence(self) -> float:
return 0.0
@staticmethod
def filter_high_byte_only(buf: Union[bytes, bytearray]) -> bytes:
buf = re.sub(b"([\x00-\x7F])+", b" ", buf)
return buf
@staticmethod
def filter_international_words(buf: Union[bytes, bytearray]) -> bytearray:
"""
We define three types of bytes:
alphabet: english alphabets [a-zA-Z]
international: international characters [\x80-\xFF]
marker: everything else [^a-zA-Z\x80-\xFF]
The input buffer can be thought to contain a series of words delimited
by markers. This function works to filter all words that contain at
least one international character. All contiguous sequences of markers
are replaced by a single space ascii character.
This filter applies to all scripts which do not use English characters.
"""
filtered = bytearray()
# This regex expression filters out only words that have at-least one
# international character. The word may include one marker character at
# the end.
words = INTERNATIONAL_WORDS_PATTERN.findall(buf)
for word in words:
filtered.extend(word[:-1])
# If the last character in the word is a marker, replace it with a
# space as markers shouldn't affect our analysis (they are used
# similarly across all languages and may thus have similar
# frequencies).
last_char = word[-1:]
if not last_char.isalpha() and last_char < b"\x80":
last_char = b" "
filtered.extend(last_char)
return filtered
@staticmethod
def remove_xml_tags(buf: Union[bytes, bytearray]) -> bytes:
"""
Returns a copy of ``buf`` that retains only the sequences of English
alphabet and high byte characters that are not between <> characters.
This filter can be applied to all scripts which contain both English
characters and extended ASCII characters, but is currently only used by
``Latin1Prober``.
"""
filtered = bytearray()
in_tag = False
prev = 0
buf = memoryview(buf).cast("c")
for curr, buf_char in enumerate(buf):
# Check if we're coming out of or entering an XML tag
# https://github.com/python/typeshed/issues/8182
if buf_char == b">": # type: ignore[comparison-overlap]
prev = curr + 1
in_tag = False
# https://github.com/python/typeshed/issues/8182
elif buf_char == b"<": # type: ignore[comparison-overlap]
if curr > prev and not in_tag:
# Keep everything after last non-extended-ASCII,
# non-alphabetic character
filtered.extend(buf[prev:curr])
# Output a space to delimit stretch we kept
filtered.extend(b" ")
in_tag = True
# If we're not in a tag...
if not in_tag:
# Keep everything after last non-extended-ASCII, non-alphabetic
# character
filtered.extend(buf[prev:])
return filtered
| Name | Type | Size | Permission | Actions |
|---|---|---|---|---|
| __pycache__ | Folder | 0755 |
|
|
| cli | Folder | 0755 |
|
|
| metadata | Folder | 0755 |
|
|
| __init__.py | File | 4.68 KB | 0644 |
|
| __main__.py | File | 123 B | 0644 |
|
| big5freq.py | File | 30.54 KB | 0644 |
|
| big5prober.py | File | 1.72 KB | 0644 |
|
| chardistribution.py | File | 9.8 KB | 0644 |
|
| charsetgroupprober.py | File | 3.82 KB | 0644 |
|
| charsetprober.py | File | 5.29 KB | 0644 |
|
| codingstatemachine.py | File | 3.64 KB | 0644 |
|
| codingstatemachinedict.py | File | 542 B | 0644 |
|
| cp949prober.py | File | 1.82 KB | 0644 |
|
| enums.py | File | 1.64 KB | 0644 |
|
| escprober.py | File | 3.91 KB | 0644 |
|
| escsm.py | File | 11.89 KB | 0644 |
|
| eucjpprober.py | File | 3.84 KB | 0644 |
|
| euckrfreq.py | File | 13.25 KB | 0644 |
|
| euckrprober.py | File | 1.71 KB | 0644 |
|
| euctwfreq.py | File | 36.05 KB | 0644 |
|
| euctwprober.py | File | 1.71 KB | 0644 |
|
| gb2312freq.py | File | 20.25 KB | 0644 |
|
| gb2312prober.py | File | 1.72 KB | 0644 |
|
| hebrewprober.py | File | 14.2 KB | 0644 |
|
| jisfreq.py | File | 25.19 KB | 0644 |
|
| johabfreq.py | File | 41.5 KB | 0644 |
|
| johabprober.py | File | 1.71 KB | 0644 |
|
| jpcntx.py | File | 26.42 KB | 0644 |
|
| langbulgarianmodel.py | File | 102.1 KB | 0644 |
|
| langgreekmodel.py | File | 96.16 KB | 0644 |
|
| langhebrewmodel.py | File | 95.88 KB | 0644 |
|
| langhungarianmodel.py | File | 98.98 KB | 0644 |
|
| langrussianmodel.py | File | 125.02 KB | 0644 |
|
| langthaimodel.py | File | 100.35 KB | 0644 |
|
| langturkishmodel.py | File | 93.13 KB | 0644 |
|
| latin1prober.py | File | 5.25 KB | 0644 |
|
| macromanprober.py | File | 5.93 KB | 0644 |
|
| mbcharsetprober.py | File | 3.63 KB | 0644 |
|
| mbcsgroupprober.py | File | 2.08 KB | 0644 |
|
| mbcssm.py | File | 29.68 KB | 0644 |
|
| py.typed | File | 0 B | 0644 |
|
| resultdict.py | File | 402 B | 0644 |
|
| sbcharsetprober.py | File | 6.25 KB | 0644 |
|
| sbcsgroupprober.py | File | 4.04 KB | 0644 |
|
| sjisprober.py | File | 3.91 KB | 0644 |
|
| universaldetector.py | File | 14.5 KB | 0644 |
|
| utf1632prober.py | File | 8.31 KB | 0644 |
|
| utf8prober.py | File | 2.75 KB | 0644 |
|
| version.py | File | 244 B | 0644 |
|