from cmath import sin
from webbrowser import get
from bs4 import BeautifulSoup
import requests
import urllib3
import pandas as pd
import os
import re
import time
from urllib.parse import urlparse, parse_qs, urlencode, quote
from requests import head
from amazon_paapi import AmazonApi
from amazon_paapi import get_asin
from telegram import Bot
import json
import httplib2
import urllib3
from isurlshortener import *
while True:
tokens = ['ece3211ef8e0badaad56132a0d602577dd662095',
'fa1ed2c33deb562dc0214ed71401ce609791771d',
'592e879e9a64632f497152524148140823a6b988',
'b60ee22a220e9fddd11a805b4f1f8da66424a18c',
'08b18e86f667e6435b76fa326f4f7fa48d23040c',
'd505d10237e0836add37a837e74df8fd0798a357',
'7c30333de88f435b41fb7011f412d2b576887a98',
'f7248cd45c31dc0ef74dd11699e27b2a419920c9',
'5e6998fa64dc8716337081259918b22eb1abe1f9',
'9ca2ea44bf972a7d5d8743a047cc3f0198fb5917',
'0d72c6cb3d7a7847c5b908680be57b79095abc68',
'02fb92a85e79c4893c206197d5528e0a3aae4bb4',
'b20752d2891f7e70e44daae2cbb74999ed0bda5b',
'2a532a27ebb136366091caf8b4aff8be0557ecb6',
'8c10a074d27c259d909081a4b34c7af4855e901a',
'ddc60d214dec3c3de7ae2c60a864267d529976eb',
'66682a3335f82bd55444a9177444d2b84619ae57',
'bc75b2d0d74ccc3d51bfed56409e2eead0fa6784',
'34cbdcfab4c363c231b1662619712c3f6b6d0b72',
'5306b31d8c3d61d2f4c418446cd5d3fc3f0b02c7',
'eb0228397fbf426a705bd9039ff55c353d7c737c',
'0a5ed06396dc5431ea30ebd5372cd563070e4c02',
'fa3e9312e19cac310ef5bfa5ac80be24d4051cf4',
'a75c0bfa6e0114319ce3fcd86e15a41f05663a10',
'55f1d6f3c118d803d84d0f2d4baad3b2d62f6b3e',
'40c420dc48028d4eb8d40f25dac12d200938ab51',
'835b827283a17f2e4aa1522aa04e692b47481543',
'16d569f4a1212fb883b5f5e835d8a9f532ac358c',
'0c8c33280ee3b8b13d373750ece8e7c94333bdf2',
'816e22c48b458235e0d058abde1704048efc2098',
'ad16d35bd16c113ed24e67a5b17c4153db4aff22',
'27249d60576bb2c4a2b8c998eaa5254ac0f1d542',
'9288d89f5662e7d40d8901f34201fd59c58626db',
'359a664ba4f990694a43d5b5005074ecc9bf68aa',
'851da9080df05a35bcde9dfefd6d6790b2998174',
'8117a98c336af35cc32dd20b24d6942678c6b90b',
'19e2030343c4ff847a8c9226a03ae48cd26e5c61',
'd59dc77264e71356cf9b807f5d4d4044d89bae9b',
'8323c7e48c4aa81578fdcae0d108ee0f73aa5044',
'7a91647d8c1317cb584fa7a534dacee0fe6cdb38',
'8b1b8d24071b100e692d1d0cd79543394c83b123',
'63f1ede227bd985c8900a3b32249617aa4b93532',
'888926be8ca84a849e2b6de6ff1f5b1709fae4f5',
'd115780a49f29c5f47e3bfd8d37ce35b8ae5f832',
'76d4e4216792426081af0dda5d8c4b56624d1156',
'8172d5ed7af66dcb3c4472741b393da0b85817bb',
'f389aa3a2d7f447789e12e3ae1b7edd9023ec8db',
'4213ce9d405a558652f7fc9727467c25b3498e17',
'28a15f4a70cbad793b5b0c43287b8905bceebcad',
'fa787ab0ef3bc8b2d3310f4f1e0862bbaa98663a',]
HEADERS = ({'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5'})
# Replace with your own Telegram bot token
bot_token = '6240833652:AAFUVI7UmZfYHK_UR3_tWYeS8hjsqYvEQ-U'
# Replace with your own chat ID
chat_id = "@dealsbyjyoti"
bot = Bot(token=bot_token)
url = "https://www.desidime.com/new?deals_view=deal_text_view" #Text view of deals
source = requests.get (url, headers=HEADERS)
pnf = "Page Not Found"
soup = BeautifulSoup(source.text,'html.parser')
div = soup.find_all('div', class_="deal-text-title")[0].a['href']#Finding first post desidime link
# single page code here...
single = "https://desidime.com"+div+""
print(single)
source2 = requests.get(single, headers=HEADERS)
soup2 = BeautifulSoup(source2.text,'html.parser')
store = soup2.find('div', class_="fl tr").img['alt']
title = soup2.find('h1').text
dealoff = soup2.find('div', class_="dealprice").text
preoff = soup2.find('div', class_="dealpercent").span.text
mrp = soup2.find('div', class_="dealpercent")
mrp2 = mrp.find_all('span')[1].text
print(dealoff ,preoff, mrp2)
desidime_buynowlink = soup2.find('a', class_="buy_now_tag")['href']
correctlink = desidime_buynowlink.split("https://")[2]
if not correctlink.startswith("https://"):
correctlink = "https://" + correctlink
print(correctlink)
KEY = "AKIAJCTGVQNTIW2GOKKQ"
SECRET = "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"
TAG = "geeky.vikas-21"
COUNTRY = "IN"
MARKETPLACE = "www.amazon.in"
PARTNERTYPE = "Associates"
amazon = AmazonApi(KEY, SECRET, TAG, COUNTRY )
try:
# Function to extract the price from the main_div
def get_price(main_div):
try:
price_span = main_div.find('span', {'class': 'a-price-whole'})
return price_span.text.replace("₹", "").replace(",", "")
except:
return None
# Function to extract the MRP from the main_div
def get_mrp(main_div):
try:
mrp_span = main_div.find('span', {'class': 'basisPrice'}).find('span', class_="a-offscreen")
return mrp_span.text.replace("₹", "").replace(",", "")
except:
return "------"
# Function to extract the coupon information from the main_div
def get_coupon(main_div):
try:
coupon_label = main_div.find('div', id="promoPriceBlockMessage_feature_div").find_all('label')[1]
return coupon_label.text.replace('Apply ', '').replace(' coupon Terms', '').replace(' ', '')
except:
return "\n---------------\n"
# Function to calculate the discounted price based on the price and coupon
def calculate_discounted_price(price, coupon):
price = int(price.replace(",", ""))
if "₹" in coupon:
price -= int(coupon.replace("₹", ""))
elif "%" in coupon:
percentage = float(coupon.strip("%")) / 100
price -= price * percentage
else:
print("Invalid coupon")
return str(price)
# Function to get information from the Amazon page
def get_amazon_page_info(correct_link):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0;Win64) AppleWebkit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}
response = requests.get(correct_link, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
main_div = soup.find('div', id="apex_desktop")
price = get_price(main_div)
mrp = get_mrp(main_div)
coupon = get_coupon(main_div)
final_price = calculate_discounted_price(price, coupon) if price is not None else None
return price, mrp, coupon, final_price
# Function to generate a custom URL with affiliate tag
def get_custom_url(correct_link, tag):
url = correct_link.replace("gp/product", "dp")
parsed_url = urlparse(url)
query = parse_qs(parsed_url.query)
query['tag'] = [tag]
new_query = '&'.join([f"{k}={v[0]}" for k, v in query.items()])
new_url = parsed_url._replace(query=new_query).geturl()
return new_url
# Function to get information from the custom page
def get_custom_page_info(new_url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0;Win64) AppleWebkit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}
response = requests.get(new_url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
main_div = soup.find('div', id="apex_desktop")
price = get_price(main_div)
mrp = get_mrp(main_div)
coupon = get_coupon(main_div)
final_price = calculate_discounted_price(price, coupon) if price is not None else None
return price, mrp, coupon, final_price
# Check if the link is from Amazon
if 'amazon' in correctlink:
# Get the ASIN and item information from the Amazon link
asintemp = get_asin(correctlink.replace('gp/product', 'dp'))
asin = asintemp.replace("?", "")
item = amazon.get_items(asin)[0]
amzn_name = item.item_info.title.display_value
amzn_img = item.images.primary.large.url
# Extract information from Amazon page
amzn_price, amzn_mrp, amzn_coupon, amzn_final_price = get_amazon_page_info(correctlink)
print("Price:", amzn_price)
print("MRP:", amzn_mrp)
print(amzn_coupon, "Coupon, After Discount", amzn_final_price)
# Generate custom URL with affiliate tag
custom_url = get_custom_url(correctlink, "jiten634-21")
print("Custom URL: ", custom_url)
# Extract information from custom page
custom_price, custom_mrp, custom_coupon, custom_final_price = get_custom_page_info(custom_url)
print("Custom Price:", custom_price)
print("MRP:", custom_mrp)
print(custom_coupon, "Coupon, After Discount", custom_final_price)
if amzn_final_price == custom_final_price:
last_price = custom_final_price
print("Last", last_price)
#Bitly Section----------------------------------------------------
url_to_shorten = custom_url
valid_token = False
while not valid_token:
for token in tokens:
headers = {'Authorization': 'Bearer ' + token}
payload = {'long_url': url_to_shorten}
try:
response = requests.post('https://api-ssl.bitly.com/v4/shorten', headers=headers, json=payload)
response.raise_for_status()
shortened_url = response.json()['link']
print("Link", shortened_url)
valid_token = True
break
except requests.exceptions.HTTPError as err:
if response.status_code == 401:
print(f'Token {token} is expired or limit exceeded')
elif response.status_code == 429:
print(f'Too many requests for token {token}')
continue
else:
raise err
shorturl = shortened_url
mycap = ""+amzn_name+"\n\n❌ MRP: ₹ "+custom_mrp+"\n✅ Deal Price: ₹ "+last_price+"\n"""+custom_coupon+"\nBuy Link:-"+shortened_url+""
bot.send_photo(chat_id=chat_id, photo=amzn_img, caption=mycap)
except:
print("Maybe some error")
time.sleep(300)
0 comments:
Post a Comment