The previous timeout values were too short for slower network conditions, causing premature timeouts during job scraping. Increased wait_for_function timeout from 30s to 80s and load_state timeout from 30s to 60s to accommodate slower page loads.
32 lines
933 B
Python
32 lines
933 B
Python
|
|
from scraping_engine import FingerprintScrapingEngine
|
|
from job_scraper2 import LinkedInJobScraper
|
|
import os
|
|
from dotenv import load_dotenv
|
|
import asyncio
|
|
|
|
# Load environment variables
|
|
load_dotenv()
|
|
|
|
|
|
async def main():
|
|
engine = FingerprintScrapingEngine(
|
|
seed="job_scraping_123",
|
|
target_os="windows",
|
|
db_path="job_listings.db",
|
|
markdown_path="job_listings.md"
|
|
)
|
|
|
|
# Initialize scraper with target field
|
|
scraper = LinkedInJobScraper(engine, human_speed=1.6, user_request="Extract title, company, location, description, requirements, qualifications, nature of job(remote, onsite, hybrid) and salary")
|
|
|
|
await scraper.scrape_jobs(
|
|
search_keywords="Web Designer location:New York",
|
|
credentials={
|
|
"email": os.getenv("SCRAPING_USERNAME"),
|
|
"password": os.getenv("SCRAPING_PASSWORD")
|
|
}
|
|
)
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(main()) |