xxxxxxxxxx
import requests
from bs4 import BeautifulSoup as bs
github_avatar = input('Input git user: ')
url = 'https://github.com/'+ github_avatar
r = requests.get(url)
soup = bs(r.text, 'html.parser')
profile_image = soup.find('img', {'alt' : 'Avatar'})['src']
# print(url)
print(profile_image)
xxxxxxxxxx
from bs4 import BeautifulSoup
import requests
response = requests.get('url')
all_links = response.find_all('a') # this will return all links+text
xxxxxxxxxx
from bs4 import BeautifulSoup
import requests
response = requests.get('url')
all_links = response.find_all('a') # this will return all links+text
for link in all_links:
print(link.get_text()) # this will prints all text
print(link.get('href')) # this will print all links
xxxxxxxxxx
import requests
from bs4 import BeautifulSoup
# Function to get all the links from a website
def get_links_from_website(url):
# Send a GET request to the website
response = requests.get(url)
# Parse the HTML content using BeautifulSoup
soup = BeautifulSoup(response.content, 'html.parser')
# Find all the anchor tags in the HTML
link_tags = soup.find_all('a')
# Extract the href attribute from each anchor tag to get the link
links = [link.get('href') for link in link_tags]
return links
# Example usage
website_url = 'https://example.com'
links = get_links_from_website(website_url)
# Print all the links
for link in links:
print(link)