Here are the examples of the python api requests.utils.requote_uri taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
64 Examples
3
Source : oauth.py
with MIT License
from chawel
with MIT License
from chawel
def get_oauth_url(self):
"""
Returns authentication (OAuth 2) URI generated using provided parameters
:return: URI for authentication
:rtype: :py:class:`str`
"""
base_url = """{auth_url}/authorize?response_type=code&client_id={client_id}&redirect_uri={redirect_uri}"""
# Just in case requote to ensure correct url format
return requests.utils.requote_uri(base_url.format(auth_url=self._auth_url,
client_id=self._id,
redirect_uri=self._redirect_uri))
def fetch_oauth_code(self):
3
Source : function00.py
with GNU General Public License v2.0
from Clinton-Abraham
with GNU General Public License v2.0
from Clinton-Abraham
async def Search1337x(query: str):
async with aiohttp.ClientSession() as session:
async with session.get(requote_uri(API_1337x.format(query, Config.MAX_RESULTS))) as res:
return (await res.json())["results"] if ((await res.json()).get("results", None) is not None) else []
async def SearchYTS(query: str):
3
Source : function00.py
with GNU General Public License v2.0
from Clinton-Abraham
with GNU General Public License v2.0
from Clinton-Abraham
async def SearchYTS(query: str):
async with aiohttp.ClientSession() as session:
async with session.get(requote_uri(API_YTS.format(query, Config.MAX_RESULTS))) as res:
return (await res.json())["results"] if ((await res.json()).get("results", None) is not None) else []
async def SearchPirateBay(query: str):
3
Source : function00.py
with GNU General Public License v2.0
from Clinton-Abraham
with GNU General Public License v2.0
from Clinton-Abraham
async def SearchPirateBay(query: str):
async with aiohttp.ClientSession() as session:
async with session.get(requote_uri(API_PIRATEBAY.format(query, Config.MAX_RESULTS))) as res:
return (await res.json())["results"] if ((await res.json()).get("results", None) is not None) else []
async def SearchAnime(query: str):
3
Source : utils.py
with GNU General Public License v3.0
from cozpii
with GNU General Public License v3.0
from cozpii
def translate(to_translate, to_language="en", from_language="auto"):
base_link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s"
to_translate = requests.utils.requote_uri(to_translate)
link = base_link % (to_language, from_language, to_translate)
try:
result = requests.get(link)
except Exception as e:
print("Couldn't connect to translation server\n")
data = result.content
soup = bs(data, "lxml")
result = soup.find_all("div", "t0")
result = str(result[0])[26:-6]
return result
# Add the tokens in the tokens file
# Initialize the path variable with the path to the tokens file
def connect():
3
Source : main.py
with MIT License
from FayasNoushad
with MIT License
from FayasNoushad
async def search(bot, update):
results = requests.get(API + requests.utils.requote_uri(update.query)).json()["result"][:50]
answers = []
for result in results:
answers.append(
InlineQueryResultPhoto(
title=update.query.capitalize(),
description=result,
caption="Made by @FayasNoushad",
photo_url=result
)
)
await update.answer(answers)
Bot.run()
3
Source : info.py
with MIT License
from FayasNoushad
with MIT License
from FayasNoushad
async def get_command(bot, update):
movie = requote_uri(update.text.split(" ", 1)[1])
username = (await bot.get_me()).username
keyboard = [
InlineKeyboardButton(
text="Click here",
url=f"https://telegram.me/{username}?start={movie}"
)
]
await update.reply_text(
text=f"**Click the button below**",
reply_markup=InlineKeyboardMarkup([keyboard]),
disable_web_page_preview=True,
quote=True
)
@Client.on_message(filters.private & filters.text & ~filters.via_bot & ~filters.edited)
3
Source : info.py
with MIT License
from FayasNoushad
with MIT License
from FayasNoushad
def get_movies(name):
movie_name = requote_uri(name)
movie_api = API + movie_name
r = requests.get(movie_api)
movies = r.json()
return movies
async def get_movie(bot, update, name, cb=False):
3
Source : main.py
with MIT License
from FayasNoushad
with MIT License
from FayasNoushad
def pypi(query):
r = requests.get(API + requote_uri(query))
info = r.json()
return info
def pypi_text(query):
3
Source : remote.py
with MIT License
from frictionlessdata
with MIT License
from frictionlessdata
def read_byte_stream_create(self):
source = requests.utils.requote_uri(self.file.source)
session = self.file.control.http_session
timeout = self.file.control.http_timeout
byte_stream = RemoteByteStream(source, session=session, timeout=timeout).open()
if self.file.control.http_preload:
buffer = io.BufferedRandom(io.BytesIO())
buffer.write(byte_stream.read())
buffer.seek(0)
byte_stream = buffer
return byte_stream
# Internal
class RemoteByteStream:
3
Source : aws.py
with MIT License
from frictionlessdata
with MIT License
from frictionlessdata
def read_byte_stream_create(self):
boto3 = helpers.import_from_plugin("boto3", plugin="aws")
control = self.file.control
client = boto3.client("s3", endpoint_url=control.endpoint_url)
source = requests.utils.requote_uri(self.file.source)
parts = urlparse(source, allow_fragments=False)
response = client.get_object(Bucket=parts.netloc, Key=parts.path[1:])
# https://github.com/frictionlessdata/tabulator-py/issues/271
byte_stream = io.BufferedRandom(io.BytesIO())
byte_stream.write(response["Body"].read())
byte_stream.seek(0)
return byte_stream
# Internal
DEFAULT_ENDPOINT_URL = "https://s3.amazonaws.com"
3
Source : test_utils.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_requote_uri_with_unquoted_percents(uri, expected):
"""See: https://github.com/psf/requests/issues/2356"""
assert requote_uri(uri) == expected
@pytest.mark.parametrize(
3
Source : test_utils.py
with MIT License
from jest-community
with MIT License
from jest-community
def test_requote_uri_with_unquoted_percents(uri, expected):
"""See: https://github.com/requests/requests/issues/2356"""
assert requote_uri(uri) == expected
@pytest.mark.parametrize(
3
Source : test_requests.py
with Apache License 2.0
from lumanjiao
with Apache License 2.0
from lumanjiao
def test_requote_uri_with_unquoted_percents(self):
"""Ensure we handle unquoted percent signs in redirects.
See: https://github.com/kennethreitz/requests/issues/2356
"""
from requests.utils import requote_uri
bad_uri = 'http://example.com/fiz?buz=%ppicture'
quoted = 'http://example.com/fiz?buz=%25ppicture'
assert quoted == requote_uri(bad_uri)
def test_requote_uri_properly_requotes(self):
3
Source : test_requests.py
with Apache License 2.0
from lumanjiao
with Apache License 2.0
from lumanjiao
def test_requote_uri_properly_requotes(self):
"""Ensure requoting doesn't break expectations."""
from requests.utils import requote_uri
quoted = 'http://example.com/fiz?buz=%25ppicture'
assert quoted == requote_uri(quoted)
class TestMorselToCookieExpires(unittest.TestCase):
3
Source : atlatl.py
with GNU General Public License v3.0
from s4dhulabs
with GNU General Public License v3.0
from s4dhulabs
def get_payload(self, cmd):
return requote_uri(quote(
"__import__('os').system('{} >/tmp/.x');\
open('/tmp/.x').readlines()".format(cmd)
)
)
def load(self):
3
Source : avsupdaterepo.py
with MIT License
from theChaosCoder
with MIT License
from theChaosCoder
def fetch_url_to_cache(url, name, tag_name, desc = None):
cache_path = os.path.join('dlcache', name + '_' + tag_name, os.path.basename(url))
url = requote_uri(url)
if not os.path.isfile(cache_path):
os.makedirs(os.path.split(cache_path)[0], exist_ok=True)
with urllib.request.urlopen(urllib.request.Request(url, method='HEAD')) as urlreq:
if not os.path.isfile(cache_path):
data = fetch_url(url, desc)
with open(cache_path, 'wb') as pl:
pl.write(data)
return cache_path
def list_archive_files(fn):
2
Source : upload_to_tg.py
with GNU Affero General Public License v3.0
from Gowtham0625
with GNU Affero General Public License v3.0
from Gowtham0625
async def upload_to_gdrive(file_upload, message, messa_ge, g_id):
await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
del_it = await message.edit_text("🔊 Now Uploading to ☁️ Cloud !!")
#subprocess.Popen(('touch', 'rclone.conf'), stdout = subprocess.PIPE)
with open('rclone.conf', 'a', newline="\n", encoding = 'utf-8') as fole:
fole.write("[DRIVE]\n")
fole.write(f"{RCLONE_CONFIG}")
destination = f'{DESTINATION_FOLDER}'
if os.path.isfile(file_upload):
g_au = ['rclone', 'copy', '--config=/app/rclone.conf', f'/app/{file_upload}', 'DRIVE:'f'{destination}', '-v']
tmp = await asyncio.create_subprocess_exec(*g_au, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
pro, cess = await tmp.communicate()
LOGGER.info(pro.decode('utf-8'))
LOGGER.info(cess.decode('utf-8'))
gk_file = re.escape(file_upload)
LOGGER.info(gk_file)
with open('filter.txt', 'w+', encoding = 'utf-8') as filter:
print(f"+ {gk_file}\n- *", file=filter)
t_a_m = ['rclone', 'lsf', '--config=/app/rclone.conf', '-F', 'i', "--filter-from=/app/filter.txt", "--files-only", 'DRIVE:'f'{destination}']
gau_tam = await asyncio.create_subprocess_exec(*t_a_m, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
#os.remove("filter.txt")
gau, tam = await gau_tam.communicate()
LOGGER.info(gau)
gautam = gau.decode("utf-8")
LOGGER.info(gautam)
LOGGER.info(tam.decode('utf-8'))
#os.remove("filter.txt")
gauti = f"https://drive.google.com/file/d/{gautam}/view?usp=drivesdk"
gau_link = re.search("(?P < url>https?://[^\s]+)", gauti).group("url")
LOGGER.info(gau_link)
#indexurl = f"{INDEX_LINK}/{file_upload}"
#tam_link = requests.utils.requote_uri(indexurl)
gjay = size(os.path.getsize(file_upload))
LOGGER.info(gjay)
button = []
button.append([pyrogram.InlineKeyboardButton(text="Google Drive URL", url=f"{gau_link}")])
if INDEX_LINK:
indexurl = f"{INDEX_LINK}/{file_upload}"
tam_link = requests.utils.requote_uri(indexurl)
LOGGER.info(tam_link)
button.append([pyrogram.InlineKeyboardButton(text="☁️ Index Website URL ☁️", url=f"{tam_link}")])
button_markup = pyrogram.InlineKeyboardMarkup(button)
await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
await messa_ge.reply_text(f"🤖: {file_upload} has been Uploaded successfully to @AbirHasan2005's Cloud < a href='tg://user?id={g_id}'> < /a>\n📀 Size: {gjay} \n\nYou can get them on Index Website ☁️", reply_markup=button_markup)
#await message.edit_text(f"""🤖: {file_upload} has been Uploaded successfully to @AbirHasan2005's cloud 🤒\n\n☁️ Cloud URL: < a href="{gau_link}">FileLink < /a>\nℹ️ Direct URL: < a href="{tam_link}">IndexLink < /a>""")
os.remove(file_upload)
await del_it.delete()
else:
tt= os.path.join(destination, file_upload)
LOGGER.info(tt)
t_am = ['rclone', 'copy', '--config=/app/rclone.conf', f'/app/{file_upload}', 'DRIVE:'f'{tt}', '-v']
tmp = await asyncio.create_subprocess_exec(*t_am, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
pro, cess = await tmp.communicate()
LOGGER.info(pro.decode('utf-8'))
LOGGER.info(cess.decode('utf-8'))
g_file = re.escape(file_upload)
LOGGER.info(g_file)
with open('filter1.txt', 'w+', encoding = 'utf-8') as filter1:
print(f"+ {g_file}/\n- *", file=filter1)
g_a_u = ['rclone', 'lsf', '--config=/app/rclone.conf', '-F', 'i', "--filter-from=/app/filter1.txt", "--dirs-only", 'DRIVE:'f'{destination}']
gau_tam = await asyncio.create_subprocess_exec(*g_a_u, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
#os.remove("filter1.txt")
gau, tam = await gau_tam.communicate()
LOGGER.info(gau)
gautam = gau.decode("utf-8")
LOGGER.info(gautam)
LOGGER.info(tam.decode('utf-8'))
#os.remove("filter1.txt")
gautii = f"https://drive.google.com/folderview?id={gautam}"
gau_link = re.search("(?P < url>https?://[^\s]+)", gautii).group("url")
LOGGER.info(gau_link)
#indexurl = f"{INDEX_LINK}/{file_upload}/"
#tam_link = requests.utils.requote_uri(indexurl)
#print(tam_link)
gjay = size(getFolderSize(file_upload))
LOGGER.info(gjay)
button = []
button.append([pyrogram.InlineKeyboardButton(text="Google Drive URL", url=f"{gau_link}")])
if INDEX_LINK:
indexurl = f"{INDEX_LINK}/{file_upload}/"
tam_link = requests.utils.requote_uri(indexurl)
LOGGER.info(tam_link)
button.append([pyrogram.InlineKeyboardButton(text="☁️ Index Website URL ☁️", url=f"{tam_link}")])
button_markup = pyrogram.InlineKeyboardMarkup(button)
await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
await messa_ge.reply_text(f"🤖: Folder has been Uploaded successfully to {tt} in @AbirHasan2005's Cloud < a href='tg://user?id={g_id}'> < /a>\n📀 Size: {gjay} \n\nYou can get them on Index Website ☁️", reply_markup=button_markup)
#await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
#await messa_ge.reply_text(f"""🤖: Folder has been Uploaded successfully to {tt} in your cloud 🤒\n\n☁️ Cloud URL: < a href="{gau_link}">FolderLink < /a>\nℹ️ Index Url:. < a href="{tam_link}">IndexLink < /a>""")
shutil.rmtree(file_upload)
await del_it.delete()
#os.remove('rclone.conf')
#
async def upload_single_file(message, local_file_name, caption_str, from_user, edit_media):
2
Source : upload_to_tg.py
with GNU Affero General Public License v3.0
from prgofficial
with GNU Affero General Public License v3.0
from prgofficial
async def upload_to_gdrive(file_upload, message, messa_ge, g_id):
await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
del_it = await message.edit_text("Uploading to ☁️ CLOUD ☁️")
#subprocess.Popen(('touch', 'rclone.conf'), stdout = subprocess.PIPE)
with open('rclone.conf', 'a', newline="\n", encoding = 'utf-8') as fole:
fole.write("[DRIVE]\n")
fole.write(f"{RCLONE_CONFIG}")
destination = f'{DESTINATION_FOLDER}'
if os.path.isfile(file_upload):
g_au = ['rclone', 'copy', '--config=/app/rclone.conf', f'/app/{file_upload}', 'DRIVE:'f'{destination}', '-vvv']
tmp = await asyncio.create_subprocess_exec(*g_au, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
pro, cess = await tmp.communicate()
LOGGER.info(pro.decode('utf-8'))
LOGGER.info(cess.decode('utf-8'))
gk_file = re.escape(file_upload)
LOGGER.info(gk_file)
with open('filter.txt', 'w+', encoding = 'utf-8') as filter:
print(f"+ {gk_file}\n- *", file=filter)
t_a_m = ['rclone', 'lsf', '--config=/app/rclone.conf', '-F', 'i', "--filter-from=/app/filter.txt", "--files-only", 'DRIVE:'f'{destination}']
gau_tam = await asyncio.create_subprocess_exec(*t_a_m, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
#os.remove("filter.txt")
gau, tam = await gau_tam.communicate()
LOGGER.info(gau)
gautam = gau.decode("utf-8")
LOGGER.info(gautam)
LOGGER.info(tam.decode('utf-8'))
#os.remove("filter.txt")
gauti = f"https://drive.google.com/file/d/{gautam}/view?usp=drivesdk"
gau_link = re.search("(?P < url>https?://[^\s]+)", gauti).group("url")
LOGGER.info(gau_link)
#indexurl = f"{INDEX_LINK}/{file_upload}"
#tam_link = requests.utils.requote_uri(indexurl)
prgs = size(os.path.getsize(file_upload))
LOGGER.info(prgs)
button = []
button.append([pyrogram.InlineKeyboardButton(text="☁️ G-Drive Link ☁️", url=f"{gau_link}")])
if INDEX_LINK:
indexurl = f"{INDEX_LINK}/{file_upload}"
tam_link = requests.utils.requote_uri(indexurl)
LOGGER.info(tam_link)
button.append([pyrogram.InlineKeyboardButton(text="ℹ️ Index Link ℹ️", url=f"{tam_link}")])
button_markup = pyrogram.InlineKeyboardMarkup(button)
await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
await messa_ge.reply_text(f"⭕️ **FILE NAME** : {file_upload}\n\n⭕️ **FILE SIZE** : {prgs}\n\n⭕️ **Your file has been Uploaded** 🥳\n\n⭕️ **©️ @prgofficial .**", reply_markup=button_markup)
#await message.edit_text(f"""🤖: {file_upload} has been Uploaded successfully to your cloud 🤒\n\n☁️ Cloud URL: < a href="{gau_link}">FileLink < /a>\nℹ️ Direct URL: < a href="{tam_link}">IndexLink < /a>""")
os.remove(file_upload)
await del_it.delete()
else:
tt= os.path.join(destination, file_upload)
LOGGER.info(tt)
t_am = ['rclone', 'copy', '--config=/app/rclone.conf', f'/app/{file_upload}', 'DRIVE:'f'{tt}', '-vvv']
tmp = await asyncio.create_subprocess_exec(*t_am, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
pro, cess = await tmp.communicate()
LOGGER.info(pro.decode('utf-8'))
LOGGER.info(cess.decode('utf-8'))
g_file = re.escape(file_upload)
LOGGER.info(g_file)
with open('filter1.txt', 'w+', encoding = 'utf-8') as filter1:
print(f"+ {g_file}/\n- *", file=filter1)
g_a_u = ['rclone', 'lsf', '--config=/app/rclone.conf', '-F', 'i', "--filter-from=/app/filter1.txt", "--dirs-only", 'DRIVE:'f'{destination}']
gau_tam = await asyncio.create_subprocess_exec(*g_a_u, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
#os.remove("filter1.txt")
gau, tam = await gau_tam.communicate()
LOGGER.info(gau)
gautam = gau.decode("utf-8")
LOGGER.info(gautam)
LOGGER.info(tam.decode('utf-8'))
#os.remove("filter1.txt")
gautii = f"https://drive.google.com/folderview?id={gautam}"
gau_link = re.search("(?P < url>https?://[^\s]+)", gautii).group("url")
LOGGER.info(gau_link)
#indexurl = f"{INDEX_LINK}/{file_upload}/"
#tam_link = requests.utils.requote_uri(indexurl)
#print(tam_link)
prgs = size(getFolderSize(file_upload))
LOGGER.info(prgs)
button = []
button.append([pyrogram.InlineKeyboardButton(text="☁️ G-Drive Link ☁️", url=f"{gau_link}")])
if INDEX_LINK:
indexurl = f"{INDEX_LINK}/{file_upload}/"
tam_link = requests.utils.requote_uri(indexurl)
LOGGER.info(tam_link)
button.append([pyrogram.InlineKeyboardButton(text="ℹ️ Index Link ℹ️", url=f"{tam_link}")])
button_markup = pyrogram.InlineKeyboardMarkup(button)
await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
await messa_ge.reply_text(f"🤖: Folder has been Uploaded successfully to {tt} in your Cloud < a href='tg://user?id={g_id}'>🤒 < /a>\n📀 Size: {prgs}", reply_markup=button_markup)
#await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
#await messa_ge.reply_text(f"""🤖: Folder has been Uploaded successfully to {tt} in your cloud 🤒\n\n☁️ Cloud URL: < a href="{gau_link}">FolderLink < /a>\nℹ️ Index Url:. < a href="{tam_link}">IndexLink < /a>""")
shutil.rmtree(file_upload)
await del_it.delete()
#os.remove('rclone.conf')
#
async def upload_single_file(message, local_file_name, caption_str, from_user, edit_media):
0
Source : covid.py
with GNU General Public License v2.0
from Aadhi000
with GNU General Public License v2.0
from Aadhi000
def covid_info(country_name):
try:
r = requests.get(API + requote_uri(country_name.lower()))
info = r.json()
country = info['country'].capitalize()
active = info['active']
confirmed = info['confirmed']
deaths = info['deaths']
info_id = info['id']
last_update = info['last_update']
latitude = info['latitude']
longitude = info['longitude']
recovered = info['recovered']
covid_info = f"""--**𝙲𝙾𝚅𝙸𝙳 𝟷𝟿 𝙸𝙽𝙵𝙾𝚁𝙼𝙰𝚃𝙸𝙾𝙽**--
᚛› Country : `{country}`
᚛› Actived : `{active}`
᚛› Confirmed : `{confirmed}`
᚛› Deaths : `{deaths}`
᚛› ID : `{info_id}`
᚛› Last Update : `{last_update}`
᚛› Latitude : `{latitude}`
᚛› Longitude : `{longitude}`
᚛› Recovered : `{recovered}`"""
return covid_info
except Exception as error:
return error
0
Source : upload_to_tg.py
with GNU Affero General Public License v3.0
from alfianandaa
with GNU Affero General Public License v3.0
from alfianandaa
async def upload_to_gdrive(file_upload, message, messa_ge, g_id):
await asyncio.sleep(5)
await message.edit_text("Uploading...")
start_time = int(round(time.time() * 1))
subprocess.Popen(('touch', 'rclone.conf'), stdout=subprocess.PIPE)
with open('rclone.conf', 'a', newline="\n") as fole:
fole.write("[DRIVE]\n")
fole.write(f"{RCLONE_CONFIG}")
destination = f'{DESTINATION_FOLDER}'
if os.path.isfile(file_upload):
tmp = subprocess.Popen(['rclone',
'copy',
'--config=rclone.conf',
f'{file_upload}',
'DRIVE:'
f'{destination}',
'-v'],
stdout=subprocess.PIPE)
pro, cess = tmp.communicate()
gk_file = re.escape(file_upload)
print(gk_file)
with open('filter.txt', 'w+') as filter:
print(f"+ {gk_file}\n- *", file=filter)
process1 = subprocess.Popen(['rclone',
'lsf',
'--config=rclone.conf',
'-F',
'i',
"--filter-from=filter.txt",
"--files-only",
'DRIVE:'
f'{destination}'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# os.remove("filter.txt")
popi, popp = process1.communicate()
print(popi)
p = popi.decode("utf-8")
print(p)
# os.remove("filter.txt")
gauti = f"https://drive.google.com/file/d/{p}/view?usp=drivesdk"
gau_link = re.search(r"(?P < url>https?://[^\s]+)", gauti).group("url")
print(gau_link)
indexurl = f"{INDEX_LINK}/{file_upload}"
tam_link = requests.utils.requote_uri(indexurl)
#s_tr = '-'*40
gjay = size(os.path.getsize(file_upload))
print(gjay)
end_time = int(round(time.time() * 1))
m_s = (end_time - start_time)
await message.edit_text(f"""**Uploaded Successfully** __in {m_s}seconds__ \n\n < a href="{tam_link}">📄 {file_upload} < /a>({gjay})""")
os.remove(file_upload)
else:
tt = os.path.join(destination, file_upload)
print(tt)
tmp = subprocess.Popen(['rclone',
'copy',
'--config=rclone.conf',
f'{file_upload}',
'DRIVE:'
f'{tt}',
'-v'],
stdout=subprocess.PIPE)
pro, cess = tmp.communicate()
print(pro)
g_file = re.escape(file_upload)
print(g_file)
with open('filter1.txt', 'w+') as filter1:
print(f"+ {g_file}/\n- *", file=filter1)
process12 = subprocess.Popen(['rclone',
'lsf',
'--config=rclone.conf',
'-F',
'i',
"--filter-from=filter1.txt",
"--dirs-only",
'DRIVE:'
f'{destination}'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# os.remove("filter1.txt")
popie, popp = process12.communicate()
print(popie)
p = popie.decode("utf-8")
print(p)
# os.remove("filter1.txt")
gautii = f"https://drive.google.com/folderview?id={p}"
gau_link = re.search(r"(?P < url>https?://[^\s]+)", gautii).group("url")
print(gau_link)
indexurl = f"{INDEX_LINK}/{file_upload}/"
tam_link = requests.utils.requote_uri(indexurl)
gjay = size(os.path.getsize(file_upload))
print(gjay)
#s_tr = '-'*40
end_time = int(round(time.time() * 1))
m_s = (end_time - start_time)
await message.edit_text(f"""**Uploaded Successfully** __in {m_s}seconds__ \n\n < a href="{tam_link}">📁 {file_upload} < /a>({gjay})""")
#
async def upload_single_file(message, local_file_name, caption_str, from_user, edit_media):
0
Source : gdriveTools.py
with GNU General Public License v3.0
from AmineSoukara
with GNU General Public License v3.0
from AmineSoukara
def clone(self, link):
self.transferred_size = 0
try:
file_id = self.getIdFromUrl(link)
except (KeyError,IndexError):
msg = "Google drive ID could not be found in the provided link"
return msg, ""
msg = ""
LOGGER.info(f"File ID: {file_id}")
try:
meta = self.getFileMetadata(file_id)
if meta.get("mimeType") == self.__G_DRIVE_DIR_MIME_TYPE:
dir_id = self.create_directory(meta.get('name'), parent_id)
result = self.cloneFolder(meta.get('name'), meta.get('name'), meta.get('id'), dir_id)
msg += f' < b>📁 Filename : < /b> < code>{meta.get("name")} < /code>\n < b>💾 Size : < /b>{get_readable_file_size(self.transferred_size)}'
buttons = button_build.ButtonMaker()
buttons.buildbutton("⚡Drive Link⚡", self.__G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(dir_id))
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{meta.get("name")}/')
buttons.buildbutton("💥Index Link💥", url)
if BUTTON_THREE_NAME is not None and BUTTON_THREE_URL is not None:
buttons.buildbutton(f"{BUTTON_THREE_NAME}", f"{BUTTON_THREE_URL}")
if BUTTON_FOUR_NAME is not None and BUTTON_FOUR_URL is not None:
buttons.buildbutton(f"{BUTTON_FOUR_NAME}", f"{BUTTON_FOUR_URL}")
if BUTTON_FIVE_NAME is not None and BUTTON_FIVE_URL is not None:
buttons.buildbutton(f"{BUTTON_FIVE_NAME}", f"{BUTTON_FIVE_URL}")
else:
file = self.copyFile(meta.get('id'), parent_id)
msg += f' < b>📁 Filename : < /b> < code>{file.get("name")} < /code>'
buttons = button_build.ButtonMaker()
buttons.buildbutton("⚡Drive Link⚡", self.__G_DRIVE_BASE_DOWNLOAD_URL.format(file.get("id")))
try:
msg += f'\n < b>💾 Size : < /b> < code>{get_readable_file_size(int(meta.get("size")))} < /code>'
except TypeError:
pass
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}')
buttons.buildbutton("💥Index Link💥", url)
if BUTTON_THREE_NAME is not None and BUTTON_THREE_URL is not None:
buttons.buildbutton(f"{BUTTON_THREE_NAME}", f"{BUTTON_THREE_URL}")
if BUTTON_FOUR_NAME is not None and BUTTON_FOUR_URL is not None:
buttons.buildbutton(f"{BUTTON_FOUR_NAME}", f"{BUTTON_FOUR_URL}")
if BUTTON_FIVE_NAME is not None and BUTTON_FIVE_URL is not None:
buttons.buildbutton(f"{BUTTON_FIVE_NAME}", f"{BUTTON_FIVE_URL}")
except Exception as err:
if isinstance(err, RetryError):
LOGGER.info(f"Total Attempts: {err.last_attempt.attempt_number}")
err = err.last_attempt.exception()
err = str(err).replace('>', '').replace(' < ', '')
LOGGER.error(err)
return err, ""
return msg, InlineKeyboardMarkup(buttons.build_menu(2))
def cloneFolder(self, name, local_path, folder_id, parent_id):
0
Source : gdriveTools.py
with GNU General Public License v3.0
from AmineSoukara
with GNU General Public License v3.0
from AmineSoukara
def drive_list(self, fileName):
msg = ""
fileName = self.escapes(str(fileName))
# Create Search Query for API request.
query = f"'{parent_id}' in parents and (name contains '{fileName}')"
response = self.__service.files().list(supportsTeamDrives=True,
includeTeamDriveItems=True,
q=query,
spaces='drive',
pageSize=200,
fields='files(id, name, mimeType, size)',
orderBy='modifiedTime desc').execute()
content_count = 0
if response["files"]:
msg += f' < h4>Results : {fileName} < /h4> < br> < br>'
for file in response.get('files', []):
if file.get('mimeType') == "application/vnd.google-apps.folder": # Detect Whether Current Entity is a Folder or File.
msg += f"⁍ < code>{file.get('name')} < br>(📁 Folder) < /code> < br>" \
f" < b> < a href='https://drive.google.com/drive/folders/{file.get('id')}'>Drive Link < /a> < /b>"
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}/')
msg += f' < b>| < a href="{url}">Index Link < /a> < /b>'
else:
msg += f"⁍ < code>{file.get('name')} < br>({get_readable_file_size(int(file.get('size')))})📄 < /code> < br>" \
f" < b> < a href='https://drive.google.com/uc?id={file.get('id')}&export=download'>Drive Link < /a> < /b>"
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}')
msg += f' < b>| < a href="{url}">Index Link < /a> < /b>'
msg += ' < br> < br>'
content_count += 1
if content_count == TELEGRAPHLIMIT :
self.telegraph_content.append(msg)
msg = ""
content_count = 0
if msg != '':
self.telegraph_content.append(msg)
if len(self.telegraph_content) == 0:
return "No Result Found :(", None
for content in self.telegraph_content :
self.path.append(Telegraph(access_token=telegraph_token).create_page(
title = 'Mirror Bot Search',
author_name='Mirror Bot',
author_url='https://github.com/magneto261290/magneto-python-aria',
html_content=content
)['path'])
self.num_of_path = len(self.path)
if self.num_of_path > 1:
self.edit_telegraph()
msg = f" < b>✴️ Search Results ✴️\n For {fileName} < /b>"
buttons = button_build.ButtonMaker()
buttons.buildbutton("Click Here", f"https://telegra.ph/{self.path[0]}")
return msg, InlineKeyboardMarkup(buttons.build_menu(1))
else :
return '', ''
def drive_slist(self, fileName):
0
Source : gdriveTools.py
with GNU General Public License v3.0
from AmineSoukara
with GNU General Public License v3.0
from AmineSoukara
def drive_slist(self, fileName):
msg = ""
fileName = self.escapes(str(fileName))
# Create Search Query for API request.
query = f"'{parent_id}' in parents and (name contains '{fileName}')"
response = self.__service.files().list(supportsTeamDrives=True,
includeTeamDriveItems=True,
q=query,
spaces='drive',
pageSize=20,
fields='files(id, name, mimeType, size)',
orderBy='modifiedTime desc').execute()
for file in response.get('files', []):
if file.get(
'mimeType') == "application/vnd.google-apps.folder": # Detect Whether Current Entity is a Folder or File.
msg += f"⁍ < a href='https://drive.google.com/drive/folders/{file.get('id')}'>{file.get('name')}" \
f" < /a> (folder)"
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}/')
msg += f' | < a href="{url}"> Index URL < /a>'
else:
msg += f"⁍ < a href='https://drive.google.com/uc?id={file.get('id')}" \
f"&export=download'>{file.get('name')} < /a> ({get_readable_file_size(int(file.get('size')))})"
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}')
msg += f' | < a href="{url}"> Index URL < /a>'
msg += '\n'
return msg
0
Source : mirror.py
with GNU General Public License v3.0
from AmineSoukara
with GNU General Public License v3.0
from AmineSoukara
def onUploadComplete(self, link: str):
with download_dict_lock:
msg = f' < b>📁 Filename : < /b> < code>{download_dict[self.uid].name()} < /code>\n < b>💾 Size : < /b> < code>{download_dict[self.uid].size()} < /code>'
buttons = button_build.ButtonMaker()
buttons.buildbutton("⚡Drive Link⚡", link)
LOGGER.info(f'Done Uploading {download_dict[self.uid].name()}')
if INDEX_URL is not None:
share_url = requests.utils.requote_uri(f'{INDEX_URL}/{download_dict[self.uid].name()}')
if os.path.isdir(f'{DOWNLOAD_DIR}/{self.uid}/{download_dict[self.uid].name()}'):
share_url += '/'
buttons.buildbutton("💥Index Link💥", share_url)
if BUTTON_THREE_NAME is not None and BUTTON_THREE_URL is not None:
buttons.buildbutton(f"{BUTTON_THREE_NAME}", f"{BUTTON_THREE_URL}")
if BUTTON_FOUR_NAME is not None and BUTTON_FOUR_URL is not None:
buttons.buildbutton(f"{BUTTON_FOUR_NAME}", f"{BUTTON_FOUR_URL}")
if BUTTON_FIVE_NAME is not None and BUTTON_FIVE_URL is not None:
buttons.buildbutton(f"{BUTTON_FIVE_NAME}", f"{BUTTON_FIVE_URL}")
if self.message.from_user.username:
uname = f"@{self.message.from_user.username}"
else:
uname = f' < a href="tg://user?id={self.message.from_user.id}">{self.message.from_user.first_name} < /a>'
if uname is not None:
msg += f'\n\ncc : {uname}'
try:
fs_utils.clean_download(download_dict[self.uid].path())
except FileNotFoundError:
pass
del download_dict[self.uid]
count = len(download_dict)
sendMarkup(msg, self.bot, self.update, InlineKeyboardMarkup(buttons.build_menu(2)))
if count == 0:
self.clean()
else:
update_all_messages()
def onUploadError(self, error):
0
Source : utils.py
with GNU Affero General Public License v3.0
from bloxlink
with GNU Affero General Public License v3.0
from bloxlink
async def fetch(self, url, method="GET", params=None, headers=None, body=None, text=False, json=True, bytes=False, raise_on_failure=True, retry=HTTP_RETRY_LIMIT, timeout=20):
params = params or {}
headers = headers or {}
new_json = {}
proxied = False
if text or bytes:
json = False
if PROXY_URL and "roblox.com" in url:
old_url = url
new_json["url"] = url
new_json["data"] = body or {}
url = PROXY_URL
proxied = True
method = "POST"
if RELEASE == "LOCAL":
print(f"{old_url} -> {url}")
else:
if RELEASE == "LOCAL":
print(f"Making request to {url} with method {method}")
new_json = body
old_url = url
url = requote_uri(url)
for k, v in params.items():
if isinstance(v, bool):
params[k] = "true" if v else "false"
try:
async with a_timeout(timeout): # I noticed sometimes the aiohttp timeout parameter doesn't work. This is added as a backup.
async with self.session.request(method, url, json=new_json, params=params, headers=headers, timeout=timeout) as response:
if proxied:
try:
response_json = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
raise RobloxAPIError
response_body = response_json["req"]["body"]
response_status = response_json["req"]["status"]
response.status = response_status
if not isinstance(response_body, dict):
try:
response_body_json = json_.loads(response_body)
except:
pass
else:
response_body = response_body_json
else:
response_status = response.status
response_body = None
if raise_on_failure:
if response_status == 503:
raise RobloxDown
elif response_status == 404:
raise RobloxNotFound
elif response_status >= 400:
if proxied:
print(old_url, response_body, flush=True)
else:
print(old_url, await response.text(), flush=True)
raise RobloxAPIError
if json:
if not proxied:
try:
response_body = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
raise RobloxAPIError
if isinstance(response_body, dict):
return response_body, response
else:
return {}, response
if text:
if proxied:
return str(response_body), response
text = await response.text()
return text, response
elif json:
if proxied:
if not isinstance(response_body, dict):
print("Roblox API Error: ", old_url, type(response_body), response_body, flush=True)
if raise_on_failure:
raise RobloxAPIError
return response_body, response
try:
json = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
print(old_url, await response.text(), flush=True)
raise RobloxAPIError
return json, response
elif bytes:
return await response.read(), response
return response
except asyncio.TimeoutError:
print(f"URL {old_url} timed out", flush=True)
raise RobloxDown
async def get_prefix(self, guild=None, trello_board=None):
0
Source : gdriveTools.py
with GNU General Public License v3.0
from carolyn99240
with GNU General Public License v3.0
from carolyn99240
def clone(self, link):
self.transferred_size = 0
try:
file_id = self.getIdFromUrl(link)
except (KeyError,IndexError):
msg = "Google drive ID could not be found in the provided link"
return msg, ""
msg = ""
LOGGER.info(f"File ID: {file_id}")
try:
meta = self.getFileMetadata(file_id)
if meta.get("mimeType") == self.__G_DRIVE_DIR_MIME_TYPE:
dir_id = self.create_directory(meta.get('name'), parent_id)
result = self.cloneFolder(meta.get('name'), meta.get('name'), meta.get('id'), dir_id)
msg += f' < b>Filename : < /b> < code>{meta.get("name")} < /code>\n < b>Size : < /b>{get_readable_file_size(self.transferred_size)}'
buttons = button_build.ButtonMaker()
buttons.buildbutton("⚡Drive Link⚡", self.__G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(dir_id))
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{meta.get("name")}/')
buttons.buildbutton("💥Index Link💥", url)
if BUTTON_THREE_NAME is not None and BUTTON_THREE_URL is not None:
buttons.buildbutton(f"{BUTTON_THREE_NAME}", f"{BUTTON_THREE_URL}")
if BUTTON_FOUR_NAME is not None and BUTTON_FOUR_URL is not None:
buttons.buildbutton(f"{BUTTON_FOUR_NAME}", f"{BUTTON_FOUR_URL}")
if BUTTON_FIVE_NAME is not None and BUTTON_FIVE_URL is not None:
buttons.buildbutton(f"{BUTTON_FIVE_NAME}", f"{BUTTON_FIVE_URL}")
else:
file = self.copyFile(meta.get('id'), parent_id)
msg += f' < b>Filename : < /b> < code>{file.get("name")} < /code>'
buttons = button_build.ButtonMaker()
buttons.buildbutton("⚡Drive Link⚡", self.__G_DRIVE_BASE_DOWNLOAD_URL.format(file.get("id")))
try:
msg += f'\n < b>Size : < /b> < code>{get_readable_file_size(int(meta.get("size")))} < /code>'
except TypeError:
pass
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}')
buttons.buildbutton("💥Index Link💥", url)
if BUTTON_THREE_NAME is not None and BUTTON_THREE_URL is not None:
buttons.buildbutton(f"{BUTTON_THREE_NAME}", f"{BUTTON_THREE_URL}")
if BUTTON_FOUR_NAME is not None and BUTTON_FOUR_URL is not None:
buttons.buildbutton(f"{BUTTON_FOUR_NAME}", f"{BUTTON_FOUR_URL}")
if BUTTON_FIVE_NAME is not None and BUTTON_FIVE_URL is not None:
buttons.buildbutton(f"{BUTTON_FIVE_NAME}", f"{BUTTON_FIVE_URL}")
except Exception as err:
if isinstance(err, RetryError):
LOGGER.info(f"Total Attempts: {err.last_attempt.attempt_number}")
err = err.last_attempt.exception()
err = str(err).replace('>', '').replace(' < ', '')
LOGGER.error(err)
return err, ""
return msg, InlineKeyboardMarkup(buttons.build_menu(2))
def cloneFolder(self, name, local_path, folder_id, parent_id):
0
Source : gdriveTools.py
with GNU General Public License v3.0
from carolyn99240
with GNU General Public License v3.0
from carolyn99240
def drive_list(self, fileName):
msg = ""
fileName = self.escapes(str(fileName))
# Create Search Query for API request.
query = f"'{parent_id}' in parents and (name contains '{fileName}')"
response = self.__service.files().list(supportsTeamDrives=True,
includeTeamDriveItems=True,
q=query,
spaces='drive',
pageSize=200,
fields='files(id, name, mimeType, size)',
orderBy='modifiedTime desc').execute()
content_count = 0
if response["files"]:
msg += f' < h4>Results : {fileName} < /h4> < br> < br>'
for file in response.get('files', []):
if file.get('mimeType') == "application/vnd.google-apps.folder": # Detect Whether Current Entity is a Folder or File.
msg += f"⁍ < code>{file.get('name')} < br>(folder📁) < /code> < br>" \
f" < b> < a href='https://drive.google.com/drive/folders/{file.get('id')}'>Drive Link < /a> < /b>"
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}/')
msg += f' < b>| < a href="{url}">Index Link < /a> < /b>'
else:
msg += f"⁍ < code>{file.get('name')} < br>({get_readable_file_size(int(file.get('size')))})📄 < /code> < br>" \
f" < b> < a href='https://drive.google.com/uc?id={file.get('id')}&export=download'>Drive Link < /a> < /b>"
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}')
msg += f' < b>| < a href="{url}">Index Link < /a> < /b>'
msg += ' < br> < br>'
content_count += 1
if content_count == TELEGRAPHLIMIT :
self.telegraph_content.append(msg)
msg = ""
content_count = 0
if msg != '':
self.telegraph_content.append(msg)
if len(self.telegraph_content) == 0:
return "No Result Found :(", None
for content in self.telegraph_content :
self.path.append(Telegraph(access_token=telegraph_token).create_page(
title = 'Mirror Bot Search',
author_name='Mirror Bot',
author_url='https://github.com/magneto261290/magneto-python-aria',
html_content=content
)['path'])
self.num_of_path = len(self.path)
if self.num_of_path > 1:
self.edit_telegraph()
msg = f" < b>Search Results For {fileName} 👇 < /b>"
buttons = button_build.ButtonMaker()
buttons.buildbutton("HERE", f"https://telegra.ph/{self.path[0]}")
return msg, InlineKeyboardMarkup(buttons.build_menu(1))
else :
return '', ''
def drive_slist(self, fileName):
0
Source : mirror.py
with GNU General Public License v3.0
from carolyn99240
with GNU General Public License v3.0
from carolyn99240
def onUploadComplete(self, link: str):
with download_dict_lock:
msg = f' < b>Filename : < /b> < code>{download_dict[self.uid].name()} < /code>\n < b>Size : < /b> < code>{download_dict[self.uid].size()} < /code>'
buttons = button_build.ButtonMaker()
buttons.buildbutton("☁️ Drive Link ☁️", link)
LOGGER.info(f'Done Uploading {download_dict[self.uid].name()}')
if INDEX_URL is not None:
share_url = requests.utils.requote_uri(f'{INDEX_URL}/{download_dict[self.uid].name()}')
if os.path.isdir(f'{DOWNLOAD_DIR}/{self.uid}/{download_dict[self.uid].name()}'):
share_url += '/'
buttons.buildbutton("⚡ Index Link ⚡", share_url)
if BUTTON_THREE_NAME is not None and BUTTON_THREE_URL is not None:
buttons.buildbutton(f"{BUTTON_THREE_NAME}", f"{BUTTON_THREE_URL}")
if BUTTON_FOUR_NAME is not None and BUTTON_FOUR_URL is not None:
buttons.buildbutton(f"{BUTTON_FOUR_NAME}", f"{BUTTON_FOUR_URL}")
if BUTTON_FIVE_NAME is not None and BUTTON_FIVE_URL is not None:
buttons.buildbutton(f"{BUTTON_FIVE_NAME}", f"{BUTTON_FIVE_URL}")
if self.message.from_user.username:
uname = f"@{self.message.from_user.username}"
else:
uname = f' < a href="tg://user?id={self.message.from_user.id}">{self.message.from_user.first_name} < /a>'
if uname is not None:
msg += f'\n\ncc : {uname}'
try:
fs_utils.clean_download(download_dict[self.uid].path())
except FileNotFoundError:
pass
del download_dict[self.uid]
count = len(download_dict)
sendMarkup(msg, self.bot, self.update, InlineKeyboardMarkup(buttons.build_menu(2)))
if count == 0:
self.clean()
else:
update_all_messages()
def onUploadError(self, error):
0
Source : function00.py
with GNU General Public License v2.0
from Clinton-Abraham
with GNU General Public License v2.0
from Clinton-Abraham
async def SearchAnime(query: str):
async with aiohttp.ClientSession() as session:
async with session.get(requote_uri(API_ANIME.format(query, Config.MAX_RESULTS))) as res:
return (await res.json())["results"] if ((await res.json()).get("results", None) is not None) else []
0
Source : cloneHelper.py
with GNU Affero General Public License v3.0
from DareAbijth
with GNU Affero General Public License v3.0
from DareAbijth
async def link_gen_size(self):
if self.name is not None:
_drive = ""
if self.name == self.filee:
_flag = "--files-only"
_up = "File"
_ui = ""
else:
_flag = "--dirs-only"
_up = "Folder"
_drive = "folderba"
_ui = "/"
g_name = re.escape(self.name)
LOGGER.info(g_name)
destination = f"{DESTINATION_FOLDER}"
with open("filter1.txt", "w+", encoding="utf-8") as filter1:
print(f"+ {g_name}{_ui}\n- *", file=filter1)
g_a_u = [
"rclone",
"lsf",
"--config=./rclone.conf",
"-F",
"i",
"--filter-from=./filter1.txt",
f"{_flag}",
f"{self.dname}:{destination}",
]
LOGGER.info(g_a_u)
gau_tam = await asyncio.create_subprocess_exec(
*g_a_u, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
gau, tam = await gau_tam.communicate()
LOGGER.info(gau)
gautam = gau.decode("utf-8")
LOGGER.info(gautam)
LOGGER.info(tam.decode("utf-8"))
if _drive == "folderba":
gautii = f"https://drive.google.com/folderview?id={gautam}"
else:
gautii = f"https://drive.google.com/file/d/{gautam}/view?usp=drivesdk"
LOGGER.info(gautii)
gau_link = re.search("(?P < url>https?://[^\s]+)", gautii).group("url")
LOGGER.info(gau_link)
button = []
button.append(
[
pyrogram.InlineKeyboardButton(
text="☁️ 𝙂𝘿𝙧𝙞𝙫𝙚-𝙐𝙧𝙡 ☁️", url=f"{gau_link}"
)
]
)
if INDEX_LINK:
if _flag == "--files-only":
indexurl = f"{INDEX_LINK}/{self.name}"
else:
indexurl = f"{INDEX_LINK}/{self.name}/"
tam_link = requests.utils.requote_uri(indexurl)
LOGGER.info(tam_link)
button.append(
[
pyrogram.InlineKeyboardButton(
text="🎦 𝙄𝙣𝙙𝙚𝙭-𝙐𝙧𝙡 🎦", url=f"{tam_link}"
)
]
)
button_markup = pyrogram.InlineKeyboardMarkup(button)
msg = await self.lsg.edit_text(
f"🤖: {_up} 𝙘𝙡𝙤𝙣𝙚𝙙 𝙨𝙪𝙘𝙘𝙚𝙨𝙨𝙛𝙪𝙡𝙡𝙮 𝙞𝙣 𝙮𝙤𝙪𝙧 𝘾𝙡𝙤𝙪𝙙 \n < a href='tg://user?id={self.u_id}'>#Cloned To Team Drive ✅ < /a>\
\n📀 Info: Calculating...",
reply_markup=button_markup,
parse_mode="html",
)
g_cmd = [
"rclone",
"size",
"--config=./rclone.conf",
f"{self.dname}:{destination}/{self.name}",
]
LOGGER.info(g_cmd)
gaut_am = await asyncio.create_subprocess_exec(
*g_cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
gaut, am = await gaut_am.communicate()
g_autam = gaut.decode("utf-8")
LOGGER.info(g_autam)
LOGGER.info(am.decode("utf-8"))
await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
await msg.edit_text(
f"🤖: {_up} 𝙘𝙡𝙤𝙣𝙚𝙙 𝙨𝙪𝙘𝙘𝙚𝙨𝙨𝙛𝙪𝙡𝙡𝙮 𝙞𝙣 𝙮𝙤𝙪𝙧 𝘾𝙡𝙤𝙪𝙙 \n < a href='tg://user?id={self.u_id}'>#Cloned To Team Drive ✅ < /a>\
\n📀 Info:\n{g_autam}",
reply_markup=button_markup,
parse_mode="html",
)
async def gcl(self):
0
Source : upload_to_tg.py
with GNU Affero General Public License v3.0
from DareAbijth
with GNU Affero General Public License v3.0
from DareAbijth
async def upload_to_gdrive(file_upload, message, messa_ge, g_id):
await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
del_it = await message.edit_text(
f" < a href='tg://user?id={g_id}'>📤 < /a>👻𝙉𝙤𝙬 𝙐𝙥𝙡𝙤𝙖𝙙𝙞𝙣𝙜 𝙩𝙤 ☁️ 𝙂𝘿𝙍𝙄𝙑𝙀!!🔁"
)
if not os.path.exists("rclone.conf"):
with open("rclone.conf", "w+", newline="\n", encoding="utf-8") as fole:
fole.write(f"{RCLONE_CONFIG}")
if os.path.exists("rclone.conf"):
with open("rclone.conf", "r+") as file:
con = file.read()
gUP = re.findall("\[(.*)\]", con)[0]
LOGGER.info(gUP)
destination = f"{DESTINATION_FOLDER}"
file_upload = str(Path(file_upload).resolve())
LOGGER.info(file_upload)
if os.path.isfile(file_upload):
g_au = [
"rclone",
"copy",
"--config=rclone.conf",
f"{file_upload}",
f"{gUP}:{destination}",
"-v",
]
LOGGER.info(g_au)
tmp = await asyncio.create_subprocess_exec(
*g_au, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
pro, cess = await tmp.communicate()
LOGGER.info(pro.decode("utf-8"))
LOGGER.info(cess.decode("utf-8"))
gk_file = re.escape(os.path.basename(file_upload))
LOGGER.info(gk_file)
with open("filter.txt", "w+", encoding="utf-8") as filter:
print(f"+ {gk_file}\n- *", file=filter)
t_a_m = [
"rclone",
"lsf",
"--config=rclone.conf",
"-F",
"i",
"--filter-from=filter.txt",
"--files-only",
f"{gUP}:{destination}",
]
gau_tam = await asyncio.create_subprocess_exec(
*t_a_m, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
# os.remove("filter.txt")
gau, tam = await gau_tam.communicate()
gautam = gau.decode().strip()
LOGGER.info(gau.decode())
LOGGER.info(tam.decode())
# os.remove("filter.txt")
gauti = f"https://drive.google.com/file/d/{gautam}/view?usp=drivesdk"
gjay = size(os.path.getsize(file_upload))
button = []
button.append(
[pyrogram.InlineKeyboardButton(
text="☁️ 𝙂𝘿𝙧𝙞𝙫𝙚-𝙐𝙧𝙡 ☁️", url=f"{gauti}")]
)
if INDEX_LINK:
indexurl = f"{INDEX_LINK}/{os.path.basename(file_upload)}"
tam_link = requests.utils.requote_uri(indexurl)
LOGGER.info(tam_link)
button.append(
[
pyrogram.InlineKeyboardButton(
text="🎦 𝙄𝙣𝙙𝙚𝙭-𝙐𝙧𝙡 🎦", url=f"{tam_link}"
)
]
)
button_markup = pyrogram.InlineKeyboardMarkup(button)
await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
await messa_ge.reply_text(
f"🗃️𝙁𝙞𝙡𝙚𝙣𝙖𝙢𝙚 : `{os.path.basename(file_upload)}`\n📀𝙎𝙞𝙯𝙚: {gjay}\n < a href='tg://user?id={g_id}'>#Uploaded To Team Drive ✅ < /a>",
reply_markup=button_markup,
)
os.remove(file_upload)
await del_it.delete()
else:
tt = os.path.join(destination, os.path.basename(file_upload))
LOGGER.info(tt)
t_am = [
"rclone",
"copy",
"--config=rclone.conf",
f"{file_upload}",
f"{gUP}:{tt}",
"-v",
]
LOGGER.info(t_am)
tmp = await asyncio.create_subprocess_exec(
*t_am, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
pro, cess = await tmp.communicate()
LOGGER.info(pro.decode("utf-8"))
LOGGER.info(cess.decode("utf-8"))
g_file = re.escape(os.path.basename(file_upload))
LOGGER.info(g_file)
with open("filter1.txt", "w+", encoding="utf-8") as filter1:
print(f"+ {g_file}/\n- *", file=filter1)
g_a_u = [
"rclone",
"lsf",
"--config=rclone.conf",
"-F",
"i",
"--filter-from=filter1.txt",
"--dirs-only",
f"{gUP}:{destination}",
]
gau_tam = await asyncio.create_subprocess_exec(
*g_a_u, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
# os.remove("filter1.txt")
gau, tam = await gau_tam.communicate()
gautam = gau.decode("utf-8")
LOGGER.info(gautam)
LOGGER.info(tam.decode("utf-8"))
# os.remove("filter1.txt")
gautii = f"https://drive.google.com/folderview?id={gautam}"
gjay = size(getFolderSize(file_upload))
LOGGER.info(gjay)
button = []
button.append(
[pyrogram.InlineKeyboardButton(
text="☁️ 𝙂𝘿𝙧𝙞𝙫𝙚-𝙐𝙧𝙡 ☁️", url=f"{gautii}")]
)
if INDEX_LINK:
indexurl = f"{INDEX_LINK}/{os.path.basename(file_upload)}/"
tam_link = requests.utils.requote_uri(indexurl)
LOGGER.info(tam_link)
button.append(
[
pyrogram.InlineKeyboardButton(
text="🎦 𝙄𝙣𝙙𝙚𝙭-𝙐𝙧𝙡 🎦", url=f"{tam_link}"
)
]
)
button_markup = pyrogram.InlineKeyboardMarkup(button)
await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
await messa_ge.reply_text(
f"🗃️𝙁𝙞𝙡𝙚𝙣𝙖𝙢𝙚 : `{os.path.basename(file_upload)}` \n < a href='tg://user?id={g_id}'>name of the uploader.. < /a>\n📀𝙎𝙞𝙯𝙚: {gjay} \n < b>#Uploaded To Team Drive ✅ < /b>",
reply_markup=button_markup,
)
shutil.rmtree(file_upload)
await del_it.delete()
#
async def upload_single_file(
0
Source : gdriveTools.py
with MIT License
from death-angel-141
with MIT License
from death-angel-141
def clone(self, link):
self.transferred_size = 0
try:
file_id = self.getIdFromUrl(link)
except (KeyError,IndexError):
msg = "Google drive ID could not be found in the provided link"
return msg
msg = ""
LOGGER.info(f"File ID: {file_id}")
try:
meta = self.__service.files().get(supportsAllDrives=True, fileId=file_id,
fields="name,id,mimeType,size").execute()
except Exception as e:
return f"{str(e).replace('>', '').replace(' < ', '')}"
if meta.get("mimeType") == self.__G_DRIVE_DIR_MIME_TYPE:
dir_id = self.create_directory(meta.get('name'), parent_id)
try:
result = self.cloneFolder(meta.get('name'), meta.get('name'), meta.get('id'), dir_id)
except Exception as e:
if isinstance(e, RetryError):
LOGGER.info(f"Total Attempts: {e.last_attempt.attempt_number}")
err = e.last_attempt.exception()
else:
err = str(e).replace('>', '').replace(' < ', '')
LOGGER.error(err)
return err
msg += f' < a href="{self.__G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(dir_id)}">{meta.get("name")} < /a>' \
f' ({get_readable_file_size(self.transferred_size)})'
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{meta.get("name")}/')
msg += f' | < a href="{url}"> Index URL < /a>'
else:
try:
file = self.copyFile(meta.get('id'), parent_id)
except Exception as e:
if isinstance(e, RetryError):
LOGGER.info(f"Total Attempts: {e.last_attempt.attempt_number}")
err = e.last_attempt.exception()
else:
err = str(e).replace('>', '').replace(' < ', '')
LOGGER.error(err)
return err
msg += f' < a href="{self.__G_DRIVE_BASE_DOWNLOAD_URL.format(file.get("id"))}">{file.get("name")} < /a>'
try:
msg += f' ({get_readable_file_size(int(meta.get("size")))}) '
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}')
msg += f' | < a href="{url}"> Index URL < /a>'
except TypeError:
pass
return msg
def cloneFolder(self, name, local_path, folder_id, parent_id):
0
Source : gdriveTools.py
with MIT License
from death-angel-141
with MIT License
from death-angel-141
def drive_list(self, fileName):
msg = ""
# Create Search Query for API request.
query = f"'{parent_id}' in parents and (name contains '{fileName}')"
response = self.__service.files().list(supportsTeamDrives=True,
includeTeamDriveItems=True,
q=query,
spaces='drive',
pageSize=20,
fields='files(id, name, mimeType, size)',
orderBy='modifiedTime desc').execute()
for file in response.get('files', []):
if file.get(
'mimeType') == "application/vnd.google-apps.folder": # Detect Whether Current Entity is a Folder or File.
msg += f"⁍ < a href='https://drive.google.com/drive/folders/{file.get('id')}'>{file.get('name')}" \
f" < /a> (folder)"
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}/')
msg += f' | < a href="{url}"> Index URL < /a>'
else:
msg += f"⁍ < a href='https://drive.google.com/uc?id={file.get('id')}" \
f"&export=download'>{file.get('name')} < /a> ({get_readable_file_size(int(file.get('size')))})"
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}')
msg += f' | < a href="{url}"> Index URL < /a>'
msg += '\n'
return msg
0
Source : RepoFilter.py
with BSD 3-Clause "New" or "Revised" License
from DecomPy
with BSD 3-Clause "New" or "Revised" License
from DecomPy
def get_results(self, date, page):
"""
Makes a single request to the GitGub api for a page with results matching the search criteria.
:param date: because github only allows us 1000 results because they are bad at making an api.
:type: str
:param page: Which page of results should be fetched.
:type: int
:return: void
"""
language_string = ""
if self.language:
language_string = "language:%s" % "C"
url = "https://api.github.com/search/repositories?q=%s%s+created:%s&page=%d" % (
requests.utils.requote_uri(self.search), language_string, date, page)
response = None
try:
# get the api request to prepare downloading the zips with authentication
if self.username is not None and self.password is not None:
response = requests.get(url, auth=(self.username, self.password))
else:
response = requests.get(url)
except Exception as e:
print("Getting url, error", e)
pass
try:
# test for 403
if response.status_code == 403:
# get time that we need to wait and wait for that time.
print("uh oh, rate limited!")
# wait their time if found
limit = response.headers
if "X-RateLimit-Reset" in limit:
# parses time to wait in seconds
wait = int(limit["X-RateLimit-Reset"]) - datetime.today().timestamp()
time.sleep(wait + 1)
else:
time.sleep(120) # wait 2 minutes then try again.
json_content = json.loads(response.content)
return json_content["items"]
except Exception as e:
print("JSON web navigator error", e)
return None
def offline_results(self, filename, date, start_page, end_page):
0
Source : main.py
with MIT License
from FayasNoushad
with MIT License
from FayasNoushad
def google(query):
r = requests.get(API + requote_uri(query))
informations = r.json()["results"][:50]
results = []
for info in informations:
text = f"**Title:** `{info['title']}`"
text += f"\n**Description:** `{info['description']}`"
text += f"\n\nMade by @FayasNoushad"
results.append(
{
"title": info['title'],
"description": info['description'],
"text": text,
"link": info['link']
}
)
return results
Bot.run()
0
Source : gdriveTools.py
with GNU General Public License v3.0
from fruitpunchsamurai2
with GNU General Public License v3.0
from fruitpunchsamurai2
def clone(self, link):
self.transferred_size = 0
try:
file_id = self.getIdFromUrl(link)
except (KeyError, IndexError):
msg = "Google drive ID could not be found in the provided link"
return msg
msg = ""
LOGGER.info(f"File ID: {file_id}")
try:
meta = self.__service.files().get(supportsAllDrives=True, fileId=file_id,
fields="name,id,mimeType,size").execute()
except Exception as e:
return f"{str(e).replace('>', '').replace(' < ', '')}"
if meta.get("mimeType") == self.__G_DRIVE_DIR_MIME_TYPE:
dir_id = self.create_directory(meta.get('name'), parent_id)
try:
result = self.cloneFolder(meta.get('name'), meta.get('name'), meta.get('id'), dir_id)
except Exception as e:
if isinstance(e, RetryError):
LOGGER.info(f"Total Attempts: {e.last_attempt.attempt_number}")
err = e.last_attempt.exception()
else:
err = str(e).replace('>', '').replace(' < ', '')
LOGGER.error(err)
return err
msg += f' < a href="{self.__G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(dir_id)}">{meta.get("name")} < /a>' \
f' ({get_readable_file_size(self.transferred_size)})'
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{meta.get("name")}/')
msg += f' | < a href="{url}"> Index URL < /a>'
else:
try:
file = self.copyFile(meta.get('id'), parent_id)
except Exception as e:
if isinstance(e, RetryError):
LOGGER.info(f"Total Attempts: {e.last_attempt.attempt_number}")
err = e.last_attempt.exception()
else:
err = str(e).replace('>', '').replace(' < ', '')
LOGGER.error(err)
return err
msg += f' < a href="{self.__G_DRIVE_BASE_DOWNLOAD_URL.format(file.get("id"))}">{file.get("name")} < /a>'
try:
msg += f' ({get_readable_file_size(int(meta.get("size")))}) '
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}')
msg += f' | < a href="{url}"> Index URL < /a>'
except TypeError:
pass
return msg
def cloneFolder(self, name, local_path, folder_id, parent_id):
0
Source : mirror.py
with GNU General Public License v3.0
from fruitpunchsamurai2
with GNU General Public License v3.0
from fruitpunchsamurai2
def onUploadComplete(self, link: str):
with download_dict_lock:
msg = f' < a href="{link}">{download_dict[self.uid].name()} < /a> ({download_dict[self.uid].size()})'
LOGGER.info(f'Done Uploading {download_dict[self.uid].name()}')
if INDEX_URL is not None:
share_url = requests.utils.requote_uri(f'{INDEX_URL}/{download_dict[self.uid].name()}')
if os.path.isdir(f'{DOWNLOAD_DIR}/{self.uid}/{download_dict[self.uid].name()}'):
share_url += '/'
msg += f'\n\n Shareable link: < a href="{share_url}">here < /a>'
if self.message.from_user.username:
uname = f"@{self.message.from_user.username}"
else:
uname = f' < a href="tg://user?id={self.message.from_user.id}">{self.message.from_user.first_name} < /a>'
if uname is not None:
msg += f'\n\ncc : {uname}'
try:
fs_utils.clean_download(download_dict[self.uid].path())
except FileNotFoundError:
pass
del download_dict[self.uid]
count = len(download_dict)
sendMessage(msg, self.bot, self.update)
if count == 0:
self.clean()
else:
update_all_messages()
def onUploadError(self, error):
0
Source : cloneHelper.py
with GNU Affero General Public License v3.0
from Gowtham0625
with GNU Affero General Public License v3.0
from Gowtham0625
async def link_gen_size(self):
if self.name is not None:
_drive = ""
if self.name == self.filee:
_flag = "--files-only"
_up = "File"
_ui = ""
else:
_flag = "--dirs-only"
_up = "Folder"
_drive = "folderba"
_ui = "/"
g_name = re.escape(self.name)
LOGGER.info(g_name)
destination = f'{DESTINATION_FOLDER}'
with open(
'filter1.txt',
'w+',
encoding= 'utf-8'
) as filter1:
print(f"+ {g_name}{_ui}\n- *",
file=filter1
)
g_a_u = [
'rclone',
'lsf',
'--config=./rclone.conf',
'-F',
'i',
"--filter-from=./filter1.txt",
f"{_flag}",
f'DRIVE:{destination}'
]
LOGGER.info(g_a_u)
gau_tam = await asyncio.create_subprocess_exec(
*g_a_u,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
gau, tam = await gau_tam.communicate()
LOGGER.info(gau)
gautam = gau.decode("utf-8")
LOGGER.info(gautam)
LOGGER.info(tam.decode('utf-8'))
if _drive == "folderba":
gautii = f"https://drive.google.com/folderview?id={gautam}"
else:
gautii = f"https://drive.google.com/file/d/{gautam}/view?usp=drivesdk"
LOGGER.info(gautii)
gau_link = re.search("(?P < url>https?://[^\s]+)", gautii).group("url")
LOGGER.info(gau_link)
button = []
button.append(
[
pyrogram.InlineKeyboardButton(
text="☁️ GDrive URL ☁️",
url=f"{gau_link}"
)
]
)
if INDEX_LINK:
if _flag == "--files-only":
indexurl = f"{INDEX_LINK}/{self.name}"
else:
indexurl = f"{INDEX_LINK}/{self.name}/"
tam_link = requests.utils.requote_uri(indexurl)
LOGGER.info(tam_link)
button.append([pyrogram.InlineKeyboardButton(text="ℹ️ Index URL ℹ️", url=f"{tam_link}")])
button_markup = pyrogram.InlineKeyboardMarkup(button)
msg = await self.lsg.edit_text(
f"🤖: {_up} Cloned successfully in @AbirHasan2005's Cloud < a href='tg://user?id={self.u_id}'>🤒 < /a>\
\n📀 Info: Calculating ...",
reply_markup=button_markup,
parse_mode="html"
)
g_cmd = [
'rclone',
'size',
'--config=rclone.conf',
f'DRIVE:{destination}/{self.name}'
]
LOGGER.info(g_cmd)
gaut_am = await asyncio.create_subprocess_exec(
*g_cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
gaut, am = await gaut_am.communicate()
g_autam = gaut.decode("utf-8")
LOGGER.info(g_autam)
LOGGER.info(am.decode('utf-8'))
await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
await msg.edit_text(
f"🤖: {_up} Cloned successfully in @AbirHasan2005's Cloud < a href='tg://user?id={self.u_id}'>🤒 < /a>\
\n📀 Info:\n{g_autam}",
reply_markup=button_markup,
parse_mode="html"
)
async def gcl(self):
0
Source : client.py
with Apache License 2.0
from IBM
with Apache License 2.0
from IBM
def get(self, restype=None, resid=None, path=None, params={}, endpoint=None, url=None, return_full_response=False):
if url is None:
url = build_url(self.api_url, restype, resid, path, endpoint)
url = requote_uri(url)
url = url.replace("[", "%5B")
url = url.replace("]", "%5D")
logging.info('\n\n')
logging.info('GET {}'.format(url))
# return json.loads(self.conn.get(url, params=params).content)
resp = self.conn.get(url, params=params)
logging.info("{} {}".format(resp.status_code,
requests.status_codes._codes[resp.status_code][0]))
logging.info('\n')
if resp.content:
response_json = resp.json()
logging.info('\n')
# Commenting this line to reduce the xml file size
#logging.info(json.dumps(response_json, sort_keys=True, indent=4, separators=(',', ': ')))
return resp.json() if not return_full_response else resp
def diag_get(self, restype=None, resid=None, path=None, params={}, endpoint=None, url=None, outfile=None):
0
Source : client.py
with Apache License 2.0
from IBM
with Apache License 2.0
from IBM
def diag_get(self, restype=None, resid=None, path=None, params={}, endpoint=None, url=None, outfile=None):
if url is None:
url = build_url(self.api_url, restype, resid, path, endpoint)
url = requote_uri(url)
url = url.replace("[", "%5B")
url = url.replace("]", "%5D")
logging.info('\n\n')
logging.info('GET {}'.format(url))
# return json.loads(self.conn.get(url, params=params).content)
resp = self.conn.get(url, params=params)
default_filename = re.findall(
'filename=(.+)', resp.headers['Content-Disposition'])[0]
if not outfile:
if not default_filename:
raise Exception(
"Couldn't get the file name to save the contents.")
outfile = os.path.join(tempfile.mkdtemp(), default_filename)
with open(outfile, 'wb') as fd:
fd.write(resp.content)
return outfile
def stream_get(self, restype=None, resid=None, path=None, params={}, endpoint=None, url=None, outfile=None):
0
Source : client.py
with Apache License 2.0
from IBM
with Apache License 2.0
from IBM
def stream_get(self, restype=None, resid=None, path=None, params={}, endpoint=None, url=None, outfile=None):
if url is None:
url = build_url(self.api_url, restype, resid, path, endpoint)
url = requote_uri(url)
url = url.replace("[", "%5B")
url = url.replace("]", "%5D")
r = self.conn.get(url, params=params)
# The response header Content-Disposition contains default file name
# Content-Disposition: attachment; filename=log_1490030341274.zip
default_filename = re.findall(
'filename=(.+)', r.headers['Content-Disposition'])[0]
if not outfile:
if not default_filename:
raise Exception(
"Couldn't get the file name to save the contents.")
outfile = os.path.join(tempfile.mkdtemp(), default_filename)
with open(outfile, 'wb') as fd:
for chunk in r.iter_content(chunk_size=64*1024):
fd.write(chunk)
return outfile
def delete(self, restype=None, resid=None, path=None, params={}, endpoint=None, url=None):
0
Source : client.py
with Apache License 2.0
from IBM
with Apache License 2.0
from IBM
def delete(self, restype=None, resid=None, path=None, params={}, endpoint=None, url=None):
if url is None:
url = build_url(self.api_url, restype, resid, path, endpoint)
url = requote_uri(url)
url = url.replace("[", "%5B")
url = url.replace("]", "%5D")
logging.info('\n\n')
logging.info('DELETE {}'.format(url))
resp = self.conn.delete(url, params=params)
logging.info("{} {}".format(resp.status_code,
requests.status_codes._codes[resp.status_code][0]))
logging.info('\n')
if resp.content:
response_json = resp.json()
logging.info('\n')
# Commenting this line to reduce the xml file size
#logging.info(json.dumps(response_json, sort_keys=True, indent=4, separators=(',', ': ')))
# return json.loads(resp.content) if resp.content else None
return resp.json() if resp.content else None
def post(self, restype=None, resid=None, path=None, data={}, params={}, endpoint=None, url=None):
0
Source : client.py
with Apache License 2.0
from IBM
with Apache License 2.0
from IBM
def post(self, restype=None, resid=None, path=None, data={}, params={}, endpoint=None, url=None):
if url is None:
url = build_url(self.api_url, restype, resid, path, endpoint)
url = requote_uri(url)
url = url.replace("[", "%5B")
url = url.replace("]", "%5D")
logging.info('\n\n')
logging.info('POST {}'.format(url))
r = self.conn.post(url, json=data, params=params)
logging.info("{} {}".format(
r.status_code, requests.status_codes._codes[r.status_code][0]))
logging.info('\n')
if r.content:
response_json = r.json()
logging.info('\n')
# Commenting this line to reduce the xml file size
#logging.info(json.dumps(response_json, sort_keys=True, indent=4, separators=(',', ': ')))
if r.content:
return r.json()
return {}
def put(self, restype=None, resid=None, path=None, data={}, params={}, endpoint=None, url=None):
0
Source : client.py
with Apache License 2.0
from IBM
with Apache License 2.0
from IBM
def put(self, restype=None, resid=None, path=None, data={}, params={}, endpoint=None, url=None):
if url is None:
url = build_url(self.api_url, restype, resid, path, endpoint)
url = requote_uri(url)
url = url.replace("[", "%5B")
url = url.replace("]", "%5D")
logging.info('\n\n')
logging.info('PUT {}'.format(url))
r = self.conn.put(url, json=data, params=params)
logging.info("{} {}".format(
r.status_code, requests.status_codes._codes[r.status_code][0]))
logging.info('\n')
if r.content:
response_json = r.json()
logging.info('\n')
# Commenting this line to reduce the xml file size
#logging.info(json.dumps(response_json, sort_keys=True, indent=4, separators=(',', ': ')))
if r.content:
return r.json()
return {}
class SppAPI(object):
0
Source : export-waf-policies-to-json.py
with MIT License
from imperva
with MIT License
from imperva
def run():
mx_host = CONFIG["mx"]["endpoint"]
session_id = ss.login(mx_host, CONFIG["mx"]["username"], CONFIG["mx"]["password"])
# Iterate through each policy and pull out normalized list of datasets, ipGroups, and signatures
for policy_name in sourcePolicies:
policy_attr = sourcePolicies[policy_name]
policy_type = policy_attr["policy_type"]
if policy_type in ss.policyMapping:
policy_resource = ss.policyMapping[policy_type]
#print(ss.policyMapping[policyAttr["policyType"]])
logging.warning("Retrieving policy_type \""+policy_type+"\" policy_name \""+policy_name+"\" from MX - REQUEST: \nGET /conf/policies/security/"+ss.policyMapping[policy_type]+"/"+policy_name)
response = ss.makeCall(mx_host, session_id, "/conf/policies/security/"+ss.policyMapping[policy_type]+"/"+requote_uri(policy_name))
if response.status_code==404:
policy_attr["isok"] = False
else:
policyObj = response.json()
ALLPOLICIES[policy_name] = {"name":policy_name,"type":policy_type,"obj":policyObj}
policy_attr["policy_obj"] = policyObj
policy_attr["isok"] = True
logging.warning("RESPONSE: \n"+str(policyObj))
# No API call for Anti-scraping, Network Protocol Validation, ATO Cloud Protection, OCSP Protocol Validation, ATO Dictionary Protection, Bot Mitigation, Cookie Signing Validation, or web worm policies
# firewallPolicies
# httpProtocolPolicies
# http2ProtocolPolicies
# webCorrelationPolicies
# snippetInjectionPolicies
# webApplicationSignaturesPolicies - signatures in predicates and exceptiosn
# httpProtocolSignaturesPolicies
# snippetInjectionPolicies
# streamSignaturesPolicies
# webApplicationSignaturesPolicies
# webProfilePolicies
# check for rules->ipGroup in firewallPolicies
if "rules" in policyObj:
for rule in policyObj["rules"]:
if "ipGroup" in rule:
if rule["ipGroup"] not in ss.ignoreADCIpGroups:
# print("Capturing IPGroup \"" + rule["ipGroup"] + "\" for policy " + policy_name)
logging.warning("Capturing IPGroup \"" + rule["ipGroup"] + "\" for policy " + policy_name)
IPGROUPS[rule["ipGroup"]] = False
else:
# print("Ignoring IPGroup \"" + rule["ipGroup"] + "\" for policy " + policy_name)
logging.warning("Ignoring IPGroup \"" + rule["ipGroup"] + "\" for policy " + policy_name)
# IPGROUPS[ipGroup] = ss.getIPGroup(AUTH["ENDPOINT"], primary_session_id, ipGroup)
# check for exceptions->predicates->ipGroups in httpProtocolPolicies, http2ProtocolPolicies, webCorrelationPolicies, snippetInjectionPolicies
if "exceptions" in policyObj:
for exception in policyObj["exceptions"]:
if "predicates" in exception:
for predicate in exception["predicates"]:
if "ipGroups" in predicate:
for ipGroup in predicate["ipGroups"]:
if ipGroup not in ss.ignoreADCIpGroups:
# print("Capturing IPGroup \"" + ipGroup + "\" for policy " + policy_name)
logging.warning("Capturing IPGroup \"" + ipGroup + "\" for policy " + policy_name)
IPGROUPS[ipGroup] = False
else:
# print("Ignoring IPGroup \"" + ipGroup + "\" for policy " + policy_name)
logging.warning("Ignoring IPGroup \"" + ipGroup + "\" for policy " + policy_name)
# check matchCriteria - webApplicationCustomPolicies, webServiceCustomPolicies
if "matchCriteria" in policyObj:
for mc in policyObj["matchCriteria"]:
# matchCriteria->lookupDatasetSearch->searchInLookupDataset
# matchCriteria->enrichmentData->searchInLookupDataset
if mc["type"] == "lookupDatasetSearch" or mc["type"] == "enrichmentData":
if "searchInLookupDataset" in mc:
for dataset in mc["searchInLookupDataset"]:
# print("Capturing lookupDatasetSearch dataset \"" + dataset + "\" for policy " + policy_name)
logging.warning("Capturing enrichmentData searchInLookupDataset dataset \"" + dataset + "\" for policy " + policy_name)
DATASETS[dataset] = False
if "lookupDatasetSearch" in mc:
for dataset in mc["lookupDatasetSearch"]:
# print("Capturing lookupDatasetSearch dataset \"" + dataset + "\" for policy " + policy_name)
logging.warning("Capturing lookupDatasetSearch dataset \"" + dataset + "\" for policy " + policy_name)
DATASETS[dataset] = False
# DATASETS[dataset] = ss.getDataset(AUTH["ENDPOINT"], primary_session_id, dataset)
# matchCriteria->datasetAttributeLookup[]->searchInLookupDataset
elif mc["type"] == "datasetAttributeLookup":
for dataset in mc["searchInLookupDataset"]:
if dataset not in ss.ignoreADCDatasets:
# print("Capturing searchInLookupDataset dataset \"" + dataset + "\" for policy " + policy_name)
logging.warning("Capturing searchInLookupDataset dataset \"" + dataset + "\" for policy " + policy_name)
DATASETS[dataset] = False
else:
# print("Ignoring dataset \"" + dataset + "\" for policy " + policy_name)
logging.warning("Capturing dataset \"" + dataset + "\" for policy " + policy_name)
# DATASETS[dataset] = ss.getDataset(AUTH["ENDPOINT"], primary_session_id, dataset)
# logging.warning("Retrieving \""+dataset+"\" dataset for policy "+policy_name)
# matchCriteria->datasetAttributeLookup->lookupDataset
if dataset not in ss.ignoreADCDatasets:
# print("Capturing lookupDataset dataset \"" + mc["lookupDataset"] + "\" for policy " + policy_name)
logging.warning("Capturing lookupDataset dataset \"" + mc["lookupDataset"] + "\" for policy " + policy_name)
DATASETS[mc["lookupDataset"]] = False
else:
# print("Ignoring lookupDataset dataset \"" + mc["lookupDataset"] + "\" for policy " + policy_name)
logging.warning("Ignoring lookupDataset dataset \"" + mc["lookupDataset"] + "\" for policy " + policy_name)
# DATASETS[mc["lookupDataset"]] = ss.getDataset(AUTH["ENDPOINT"], primary_session_id, mc["lookupDataset"])
# logging.warning("Retrieving \"" + mc["lookupDataset"] + "\" dataset for policy " + policy_name)
elif mc["type"] == "signatures":
# sourcePolicies[policy_name]["isok"] = False
for signature in mc["signatures"]:
policy_attr["isok"] = False
SIGNATURES[signature["name"]] = False
logging.warning("Retrieving \""+signature["name"]+"\" signature for policy "+policy_name)
# print(mc["type"])
# matchCriteria->sourceIpAddresses[]
# matchCriteria->proxyIpAddresses[]
elif mc["type"] == "sourceIpAddresses" or mc["type"] == "proxyIpAddresses":
for ipGroup in mc["ipGroups"]:
if ipGroup not in ss.ignoreADCIpGroups:
# print("Capturing sourceIpAddresses ipGroup \"" + ipGroup + "\" for policy " + policy_name)
logging.warning("Capturing sourceIpAddresses ipGroup \"" + ipGroup + "\" for policy " + policy_name)
IPGROUPS[ipGroup] = False
else:
# print("Ignoring sourceIpAddresses ipGroup \"" + ipGroup + "\" for policy " + policy_name)
logging.warning("Ignoring sourceIpAddresses ipGroup \"" + ipGroup + "\" for policy " + policy_name)
# logging.warning("Retrieving IPGroup ("+ipGroup+") for policy " + policy_name)
# IPGROUPS[ipGroup] = ss.getIPGroup(AUTH["ENDPOINT"], primary_session_id, ipGroup)
else:
policy_attr["isok"] = False
print("Unsupported policy type \"" + policy_type + "\", skipping policy policy \"" + policy_name + "\"")
logging.warning("Unsupported policy type \"" + policy_type + "\", skipping policy policy \"" + policy_name + "\"")
# load normalized list of datasets
for dataset in DATASETS:
logging.warning("Retrieving \"" + dataset + "\" dataset")
DATASETS[dataset] = ss.getDataset(mx_host, session_id, dataset)
# load normalized list of ipGroups
for ipGroup in IPGROUPS:
IPGROUPS[ipGroup] = ss.getIPGroup(mx_host, session_id, ipGroup)
# signatures are not supported at this time, no method of retrieving list of signatures from system stream signatures
# Export each to disk in json format
os.makedirs("export/datasets",exist_ok = True)
for dataset_name in DATASETS:
ss.WriteFile("export/datasets/"+dataset_name.replace("/","_")+".json", json.dumps({"name":dataset_name,"obj":DATASETS[dataset_name]}))
os.makedirs("export/signatures",exist_ok = True)
for signatures_name in SIGNATURES:
ss.WriteFile("export/signatures/"+signatures_name.replace("/","_")+".json", json.dumps({"name":signatures_name,"obj":SIGNATURES[signatures_name]}))
os.makedirs("export/ipgroups",exist_ok = True)
for ipgroup_name in IPGROUPS:
ss.WriteFile("export/ipgroups/"+ipgroup_name.replace("/","_")+".json", json.dumps({"name":ipgroup_name,"obj":IPGROUPS[ipgroup_name]}))
os.makedirs("export/policies",exist_ok = True)
for policy_name in ALLPOLICIES:
ss.WriteFile("export/policies/"+policy_name.replace("/","_")+".json", json.dumps(ALLPOLICIES[policy_name]))
if __name__ == '__main__':
0
Source : gDrive.py
with MIT License
from jagrit007
with MIT License
from jagrit007
def clone(self, link, status, ignoreList=[]):
self.transferred_size = 0
try:
file_id = self.getIdFromUrl(link)
except (KeyError,IndexError):
msg = "Google drive ID could not be found in the provided link"
return msg
msg = ""
LOGGER.info(f"File ID: {file_id}")
try:
meta = self.__service.files().get(supportsAllDrives=True, fileId=file_id,
fields="name,id,mimeType,size").execute()
dest_meta = self.__service.files().get(supportsAllDrives=True, fileId=self.gparentid,
fields="name,id,size").execute()
status.SetMainFolder(meta.get('name'), self.__G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(meta.get('id')))
status.SetDestinationFolder(dest_meta.get('name'), self.__G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(dest_meta.get('id')))
except Exception as e:
return f"{str(e).replace('>', '').replace(' < ', '')}"
if meta.get("mimeType") == self.__G_DRIVE_DIR_MIME_TYPE:
dir_id = self.check_folder_exists(meta.get('name'), self.gparentid)
if not dir_id:
dir_id = self.create_directory(meta.get('name'), self.gparentid)
try:
self.cloneFolder(meta.get('name'), meta.get('name'), meta.get('id'), dir_id, status, ignoreList)
except Exception as e:
if isinstance(e, RetryError):
LOGGER.info(f"Total Attempts: {e.last_attempt.attempt_number}")
err = e.last_attempt.exception()
else:
err = str(e).replace('>', '').replace(' < ', '')
LOGGER.error(err)
return err
status.set_status(True)
msg += f' < a href="{self.__G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(dir_id)}">{meta.get("name")} < /a>' \
f' ({get_readable_file_size(self.transferred_size)})'
if INDEX_URL:
url = requests.utils.requote_uri(f'{INDEX_URL}/{meta.get("name")}/')
msg += f' | < a href="{url}"> Index URL < /a>'
else:
try:
file = self.check_file_exists(meta.get('id'), self.gparentid)
if file:
status.checkFileExist(True)
if not file:
status.checkFileExist(False)
file = self.copyFile(meta.get('id'), self.gparentid, status)
except Exception as e:
if isinstance(e, RetryError):
LOGGER.info(f"Total Attempts: {e.last_attempt.attempt_number}")
err = e.last_attempt.exception()
else:
err = str(e).replace('>', '').replace(' < ', '')
LOGGER.error(err)
return err
msg += f' < a href="{self.__G_DRIVE_BASE_DOWNLOAD_URL.format(file.get("id"))}">{file.get("name")} < /a>'
try:
msg += f' ({get_readable_file_size(int(meta.get("size")))}) '
if INDEX_URL is not None:
url = requests.utils.requote_uri(f'{INDEX_URL}/{file.get("name")}')
msg += f' | < a href="{url}"> Index URL < /a>'
except TypeError:
pass
return msg
def cloneFolder(self, name, local_path, folder_id, parent_id, status, ignoreList=[]):
0
Source : gdriveTools.py
with GNU General Public License v3.0
from Jigarvarma2005
with GNU General Public License v3.0
from Jigarvarma2005
def upload(self, file_path: str):
url=None;
if Config.USE_SERVICE_ACCOUNTS:
self.service_account_count = len(os.listdir("accounts"))
file_name = pathlib.PurePath(file_path).name
LOGGER.info("Uploading File: " + file_path)
self.start_time = time.time()
self.updater = setInterval(self.update_interval, self._on_upload_progress)
if os.path.isfile(file_path):
try:
mime_type = self.get_mime_type(file_path)
link = self.upload_file(file_path, file_name, mime_type, Config.parent_id)
if link is None:
raise Exception('Upload has been manually cancelled')
LOGGER.info("Uploaded To G-Drive: " + file_path)
if Config.INDEX_URL is not None:
url = requests.utils.requote_uri(f'{Config.INDEX_URL}/{file_name}')
except Exception as e:
if isinstance(e, RetryError):
LOGGER.info(f"Total Attempts: {e.last_attempt.attempt_number}")
err = e.last_attempt.exception()
else:
err = e
LOGGER.error(err)
return
finally:
self.updater.cancel()
else:
try:
dir_id = self.create_directory(os.path.basename(os.path.abspath(file_name)), Config.parent_id)
result = self.upload_dir(file_path, dir_id)
if result is None:
raise Exception('Upload has been manually cancelled!')
LOGGER.info("Uploaded To G-Drive: " + file_name)
link = f"https://drive.google.com/folderview?id={dir_id}"
except Exception as e:
if isinstance(e, RetryError):
LOGGER.info(f"Total Attempts: {e.last_attempt.attempt_number}")
err = e.last_attempt.exception()
else:
err = e
LOGGER.error(err)
self.__listener.onUploadError(str(err))
return
finally:
self.updater.cancel()
LOGGER.info(download_dict)
LOGGER.info("Deleting downloaded file/folder..")
return link,url
@retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(5),
0
Source : gdriveTools.py
with GNU General Public License v3.0
from Jigarvarma2005
with GNU General Public License v3.0
from Jigarvarma2005
def clone(self, link):
self.transferred_size = 0
try:
file_id = self.getIdFromUrl(link)
except (KeyError,IndexError):
msg = "Google drive ID could not be found in the provided link"
return msg
msg = ""
LOGGER.info(f"File ID: {file_id}")
try:
meta = self.getFileMetadata(file_id)
if meta.get("mimeType") == self.__G_DRIVE_DIR_MIME_TYPE:
dir_id = self.create_directory(meta.get('name'), Config.parent_id)
result = self.cloneFolder(meta.get('name'), meta.get('name'), meta.get('id'), dir_id)
msg += f' < a href="{self.__G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(dir_id)}">{meta.get("name")} < /a>' \
f' ({self.get_readable_file_size(self.transferred_size)})'
if Config.INDEX_URL is not None:
url = requests.utils.requote_uri(f'{Config.INDEX_URL}/{meta.get("name")}/')
msg += f' | < a href="{url}"> Index URL < /a>'
else:
file = self.copyFile(meta.get('id'), Config.parent_id)
msg += f' < a href="{self.__G_DRIVE_BASE_DOWNLOAD_URL.format(file.get("id"))}">{file.get("name")} < /a>'
try:
msg += f' ({self.get_readable_file_size(int(meta.get("size")))}) '
except TypeError:
pass
if Config.INDEX_URL is not None:
url = requests.utils.requote_uri(f'{Config.INDEX_URL}/{file.get("name")}')
msg += f' | < a href="{url}"> Index URL < /a>'
except Exception as err:
if isinstance(err, RetryError):
LOGGER.info(f"Total Attempts: {err.last_attempt.attempt_number}")
err = err.last_attempt.exception()
err = str(err).replace('>', '').replace(' < ', '')
LOGGER.error(err)
return err
return msg
def cloneFolder(self, name, local_path, folder_id, parent_id):
0
Source : gdriveTools.py
with GNU General Public License v3.0
from Jigarvarma2005
with GNU General Public License v3.0
from Jigarvarma2005
def drive_list(self, fileName):
msg = ""
fileName = self.escapes(str(fileName))
# Create Search Query for API request.
query = f"'{Config.parent_id}' in parents and (name contains '{fileName}')"
response = self.__service.files().list(supportsTeamDrives=True,
includeTeamDriveItems=True,
q=query,
spaces='drive',
pageSize=20,
fields='files(id, name, mimeType, size)',
orderBy='modifiedTime desc').execute()
for file in response.get('files', []):
if file.get(
'mimeType') == "application/vnd.google-apps.folder": # Detect Whether Current Entity is a Folder or File.
msg += f"⁍ < a href='https://drive.google.com/drive/folders/{file.get('id')}'>{file.get('name')}" \
f" < /a> (folder)"
if Config.INDEX_URL is not None:
url = requests.utils.requote_uri(f'{Config.INDEX_URL}/{file.get("name")}/')
msg += f' | < a href="{url}"> Index URL < /a>'
else:
msg += f"⁍ < a href='https://drive.google.com/uc?id={file.get('id')}" \
f"&export=download'>{file.get('name')} < /a> ({self.get_readable_file_size(int(file.get('size')))})"
if Config.INDEX_URL is not None:
url = requests.utils.requote_uri(f'{Config.INDEX_URL}/{file.get("name")}')
msg += f' | < a href="{url}"> Index URL < /a>'
msg += '\n'
return msg
def get_readable_file_size(self,size_in_bytes) -> str:
See More Examples