python – Create functions for web scraping

I built a web scraper to get a list of jobs on Facebook and other websites, but I want to divide the code into functions that I can reuse for other websites. This structure is working but I think it can be more efficient with the functions. I'm getting stuck on how to structure the functions. He is only throwing two pages for the test.

from the time of import
of import requests to obtain
from time we import the dream
of random import randint
from IPython.core.display import clear_output
of import notices notify
from bs4 import BeautifulSoup
import csv

# Rank of only 2 pages
pages = [str(i) for i in range(1, 3)]
cities = ["Menlo%20Park%2C%20CA",
          "Fremont%2C%20CA",
          "Los%20Angeles%2C%20CA",
          "Mountain%20View%2C%20CA",
          "Northridge%2CCA",
          "Redmond%2C%20WA",
          "San%20Francisco%2C%20CA",
          "Santa%20Clara%2C%20CA",
          "Seattle%2C%20WA",
          "Woodland%20Hills%2C%20CA"]

# Preparing to track the loop.
start_time = time ()
requests = 0

with open (& # 39; facebook_job_list.csv & # 39 ;, & # 39; w & # 39 ;, newline = & # 39; & # 39;) as f:
header = csv.writer (f)
header.writerow (["Website", "Title", "Location", "Job URL"])

for page in pages:
for c in cities:
# Request the html page
response = get ("https://www.facebook.com/careers/jobs/?page=" + page +
"& results_per_page = 100 & locations[0]= "+ c)

# Pause the loop between 8 and 15 seconds.
dream (randint (8, 15))

# Monitor the frequency of requests.
+ requests = 1
elapsed_time = time () - start_time
print ("Request: {}; Frequency: {} request / s" .format (requests, requests / elapsed time))
clear_output (wait = True)

# Launch a warning for status codes other than 200
yes response.status_code! = 200:
warn ("Request: {}; Status Code: {}". format (requests, response.status_code))

# Break the cycle if the number of requests is greater than expected
if requests> 2:
warn ("The number of requests was greater than expected.")
break

# Analyze the content of the application with BeautifulSoup
page_soup = BeautifulSoup (response.text, & # 39; html.parser & # 39;)
job_containers = page_soup.find_all ("a", "_69jm")

# Select all containers of 100 jobs on a single page
for container in jobs:
site = page_soup.find ("title"). text
title = container.find ("div", "_69jo"). text
location = container.find ("div", "_1n-z _6hy- _21-h"). text
link = container.get ("href")
job_link = "https://www.facebook.com" + link

with open (& # 39; facebook_job_list.csv & # 39 ;, & # 39 ;, newline = & # 39; & # 39;) as f:
rows = csv.writer (f)
rows.writerow ([site, title, location, job_link])