September 19, 2025, 12:07am 1
Hi
I got pissed by some social media filters content based on keywords (remember when “X” blocked mentions of Mastodon?) or how some companies parse someone’s texts to train AI…
This tool will make life for bots harder.
Yes, I am author. And yes, I allow spreading/using/modifying etc, I am not greedy.
I tried to make it harder to abuse script by blocking some common patterns, but since code is open it is still trivial to bypass.
Here is my tool:
Tool
import re
import sys
import random
CHAR_POOLS = {
"upper": [
"А","Β","Ϲ","Ԁ","Ε","Ғ","Ԍ","Н","Ι","Ј","Κ","Ꮮ","М","Ν","О","Р","Ⴓ","ᖇ","Ѕ","Т","Ս","Ѵ","Ѡ","Χ","Υ","Ζ",
"Ⱥ","Ȼ","Ȩ","Ȟ","ȴ","Ⱥ","Ḿ","Ṅ","Ṍ","Ṗ","Ɋ","Ŕ","Ṡ","ᚦ","Ŧ","Ṳ","Ṽ","Ẃ","Ẍ","Ẏ","Ẑ"
],
"lower": [
"а","Ь","с","ԁ","е","ƒ","ɡ","һ","...
September 19, 2025, 12:07am 1
Hi
I got pissed by some social media filters content based on keywords (remember when “X” blocked mentions of Mastodon?) or how some companies parse someone’s texts to train AI…
This tool will make life for bots harder.
Yes, I am author. And yes, I allow spreading/using/modifying etc, I am not greedy.
I tried to make it harder to abuse script by blocking some common patterns, but since code is open it is still trivial to bypass.
Here is my tool:
Tool
import re
import sys
import random
CHAR_POOLS = {
"upper": [
"А","Β","Ϲ","Ԁ","Ε","Ғ","Ԍ","Н","Ι","Ј","Κ","Ꮮ","М","Ν","О","Р","Ⴓ","ᖇ","Ѕ","Т","Ս","Ѵ","Ѡ","Χ","Υ","Ζ",
"Ⱥ","Ȼ","Ȩ","Ȟ","ȴ","Ⱥ","Ḿ","Ṅ","Ṍ","Ṗ","Ɋ","Ŕ","Ṡ","ᚦ","Ŧ","Ṳ","Ṽ","Ẃ","Ẍ","Ẏ","Ẑ"
],
"lower": [
"а","Ь","с","ԁ","е","ƒ","ɡ","һ","і","ј","κ","ⅼ","м","п","о","р","զ","г","ѕ","т","ս","ν","ш","х","у","ᴢ",
"ą","ả","ȼ","ḓ","ė","ḟ","ġ","ḧ","ỉ","ĵ","ķ","ḽ","ṃ","ṅ","ǫ","ƥ","ɋ","ř","ṡ","ť","ų","ṽ","ẇ","ẋ","ÿ","ȥ"
],
"combining": [
"\u0300","\u0301","\u0302","\u0303","\u0304","\u0306","\u0307","\u0308","\u030A","\u030B","\u0323","\u0324","\u0325","\u0361","\u035C"
],
"symbols": [
"₳","฿","₵","Đ","Ɇ","₣","₲","Ħ","ł","₪","₭","Ł","₥","₦","Ø","₽","Ɋ","Ɍ","§","Ŧ","Ʉ","Ѵ","Ɇ","Ӿ","Ɏ","Ɀ",
"☆","✦","✶","✹","✺","✻","✼","✽","✾","✿","✶"
]
}
FORBIDDEN_PATTERNS = [
r"@\w+",
r"\b[\w\.-]+@[\w\.-]+\.[a-zA-Z]{2,}\b",
r"https?://\S+",
r"www\.\S+",
r"\+\d{6,}"
]
def contains_forbidden(text):
for p in FORBIDDEN_PATTERNS:
if re.search(p, text):
return True
return False
def random_choice(pool):
return random.choice(pool)
def apply_combining(s, density=0.15):
out = []
for ch in s:
out.append(ch)
if ch.isalpha() and random.random() < density:
out.append(random_choice(CHAR_POOLS["combining"]))
if random.random() < 0.15:
out.append(random_choice(CHAR_POOLS["combining"]))
return "".join(out)
def obfuscate_normal(text):
result = []
for ch in text:
if ch.isalpha():
idx = (ord(ch.lower()) - ord('a')) % 26
if ch.isupper():
mapped = CHAR_POOLS["upper"][idx % len(CHAR_POOLS["upper"])]
else:
mapped = CHAR_POOLS["lower"][idx % len(CHAR_POOLS["lower"])]
if random.random() < 0.25:
mapped = mapped + random_choice(CHAR_POOLS["combining"])
result.append(mapped)
elif ch.isdigit():
result.append(random_choice(["0","1","2","3","4","5","6","7","8","9"]))
else:
result.append(ch)
return apply_combining("".join(result), density=0.15)
def contains_non_latin(text):
for ch in text:
if ch.isalpha():
name = ch
try:
if ('LATIN' not in unicodedata.name(ch)):
return True
except Exception:
return True
return False
import unicodedata
def main():
random.seed()
prompt = "Enter text to obfuscate and then hit ENTER.\n"
text = input(prompt)
if contains_forbidden(text):
print("Sorry, this cannot be converted. This is not a SPAM tool. Please do NOT use it as SPAM filter bypass")
sys.exit(0)
if contains_non_latin(text):
print("Sorry, only Latin languages supported.")
sys.exit(0)
print(obfuscate_normal(text))
if __name__ == "__main__":
main()
What it does? Converts text to format that confuses bots (Example → Ёхąḿҏļẻ)
To use it run:
sudo apt update && sudo apt install python
Then
nano obfuscator.py
And paste code there.
To run:
python obfuscator.py