0

In my code bellow I try to import all the odds from different urls pages (25 in total). I currently use a simple loop but it takes too much time. How can I parallelize this code to reduce the execution time.

Here is the code :

#!/usr/bin/python3
# -*- coding: utf­-8 ­-*-

from selenium import webdriver
import statistics as stat
import numpy as np

driver = webdriver.Firefox()
url = 'https://www.coteur.com/cotes-foot.php'
driver.get(url)

#Store url associated with the soccer games
url_links = []
for i in driver.find_elements_by_xpath('//a[contains(@href, "match/cotes-")]'):
    url_links.append(i.get_attribute('href'))

driver.close()
print(len(url_links), '\n')

for l in range(len(url_links)):
    driver = webdriver.Firefox()
    driver.get(url_links[l])

    #Store odds into table
    odds = []
    header = []
    for i in driver.find_elements_by_xpath('//button[contains(@class, "btn btn-default btn-xs btncote")]'):
        odds.append(i.text)

    for i in driver.find_elements_by_xpath('//th[contains(@width, "20%")]'):
        header.append(i.text)

    rows = int(len(odds)/3) 
    columns = 3
    odds = [float(i) for i in odds]
    odds = np.array(odds)
    header = np.array(header)
    odds = odds.reshape(rows, columns)

    print(odds, '\n')
    driver;close()

1 Answer 1

1

ThreadPoolExecutor would do the perfect job, if your using python>3.5.

from selenium import webdriver
import statistics as stat
import numpy as np
from concurrent.futures import ThreadPoolExecutor

driver = webdriver.Firefox()
url = 'https://www.coteur.com/cotes-foot.php'
driver.get(url)

#Store url associated with the soccer games
url_links = []
for i in driver.find_elements_by_xpath('//a[contains(@href, "match/cotes-")]'):
    url_links.append(i.get_attribute('href'))

driver.close()
print(len(url_links), '\n')

def sraper(url_link):
    driver = webdriver.Firefox()
    driver.get(url_link)

    #Store odds into table
    odds = []
    header = []
    for i in driver.find_elements_by_xpath('//button[contains(@class, "btn btn-default btn-xs btncote")]'):
        odds.append(i.text)

    for i in driver.find_elements_by_xpath('//th[contains(@width, "20%")]'):
        header.append(i.text)

    rows = int(len(odds)/3) 
    columns = 3
    odds = [float(i) for i in odds]
    odds = np.array(odds)
    header = np.array(header)
    odds = odds.reshape(rows, columns)

    print(odds, '\n')
    driver;close()
    
with ThreadPoolExecutor(max_workers=8) as executor:
    executor.map(sraper, url_links)
Sign up to request clarification or add additional context in comments.

Comments

Your Answer

By clicking “Post Your Answer”, you agree to our terms of service and acknowledge you have read our privacy policy.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.