The other week I faced the struggle of scraping dynamically generated content. So I used the selenium library with combination of requests & bs4, the thing is I am unsure of the quality of the implementation as I just learned how to use those tools. I want to have a general feedback on the way I used the libraries, the quality of my code and the logic behind it.
Link to GitHub README.
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
import selenium.common.exceptions
import requests
from bs4 import BeautifulSoup
from time import sleep
def scraper():
opts = Options()
opts.add_argument('--headless')
driver = webdriver.Chrome(r'C:\Users\leagu\chromedriver.exe', options=opts)
pos = input('Enter your desired position: ')
URL = 'https://remote.co/remote-jobs/search/?search_keywords='+pos.replace(' ', '+')
driver.get(URL)
# Scroll to the bottom of the page
while True:
try:
WebDriverWait(driver, 5).until(
ec.text_to_be_present_in_element(
(By.CLASS_NAME, 'load_more_jobs'),
'Load more listings')
)
loadMore = driver.find_element_by_class_name('load_more_jobs')
loadMore.click()
except:
try: # can't locate element - click the close on the popup add
WebDriverWait(driver, 5).until(
ec.presence_of_element_located((By.CLASS_NAME, 'portstlucie-CloseButton'))
)
addClose = driver.find_element_by_xpath('//*[@id="om-oqulaezshgjig4mgnmcn-optin"]/div/button')
addClose.click()
except: # Timeout / can't locate add - break
break
# Find all the job listings
listings = driver.find_elements_by_class_name('job_listing')
if len(listings) == 0:
print(f'There are 0 jobs found by {pos} criteria. Please use different wording.')
sleep(5)
scraper()
else:
scrapeN = input(f"There are {len(listings)} number of jobs for the {pos} position. If u wish to view a portion of them enter the number of the jobs to view else write 'max': ")
if scrapeN.lower() == 'max':
scrapeN = len(listings)
scrapeN = input(f"There are {len(listings)} number of jobs for the {pos} position. If u wish to view a portion of them enter the number of the jobs to view else write 'max': " )
print('\n')
for i in range(int(scrapeN)): # Iterate trough all the job listings
URL = listings[i].find_element_by_tag_name('a').get_attribute('href')
html = requests.get(URL)
soup = BeautifulSoup(html.content, 'html.parser')
jobT = soup.find('h1', class_='font-weight-bold').text
jobPD = soup.find('time').text
link = soup.find('a', class_='application_button')['href']
print(f'Job - {jobT}. This job was {jobPD}.\nMore information about the job at {URL}. \nLink for application - {link}', end='\n\n')
if __name__ == '__main__':
scraper()