forked from rannes.dev/sw-jobs-scraper
Compare commits
31 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
f9530fd7d2 | ||
|
fd73c4d5d9 | ||
|
bbb1c3f234 | ||
|
8a5e665f77 | ||
|
7060e66713 | ||
|
6b12914c1f | ||
|
44b86ea7ec | ||
|
f1efdb879c | ||
|
6420086124 | ||
|
fc17076d3f | ||
|
d6aab7675e | ||
|
7c6955c2cc | ||
|
777c9052ed | ||
|
b299e19571 | ||
|
709464b5f7 | ||
9b0941a04a | |||
ec140cbbc7 | |||
f7fcb41a87 | |||
cd675c0d6a | |||
333739450f | |||
8c9f6e2dee | |||
32f83e358b | |||
979ed97738 | |||
8abff30b52 | |||
fd9b4b515c | |||
1d25f4e112 | |||
07bb549d44 | |||
c0ec6dc003 | |||
994ee9c732 | |||
693d654764 | |||
38023c1aa5 |
31
.gitea/workflows/build.yaml
Normal file
31
.gitea/workflows/build.yaml
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
name: Build and Push Docker Image
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ main ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-push:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Log in to Gitea Container Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ${{ secrets.REGISTRY }}
|
||||||
|
username: ${{ secrets.USER }}
|
||||||
|
password: ${{ secrets.TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push Docker image
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
tags: ${{ secrets.REGISTRY }}/rannes.dev/sw-jobs-scraper:latest
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -1 +1,4 @@
|
|||||||
/lambda-package
|
/thehub_cache
|
||||||
|
/thehub.json
|
||||||
|
/itjobbank_cache
|
||||||
|
/it-jobbank.json
|
11
Dockerfile
Normal file
11
Dockerfile
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
FROM golang:1.23 AS builder
|
||||||
|
WORKDIR /app
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
RUN go mod download
|
||||||
|
COPY *.go ./
|
||||||
|
RUN CGO_ENABLED=0 GOOS=linux go build -o job-scraper
|
||||||
|
|
||||||
|
FROM alpine:3.18
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /app/job-scraper .
|
||||||
|
CMD ["./job-scraper"]
|
31
build.sh
31
build.sh
@ -1,31 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Set variables
|
|
||||||
PACKAGE_DIR="./lambda-package"
|
|
||||||
BUILD_FILE="bootstrap"
|
|
||||||
ZIP_FILE="lambda-deployment.zip"
|
|
||||||
SOURCE_FILE="main.go"
|
|
||||||
|
|
||||||
# Delete the content of the lambda-package directory
|
|
||||||
rm -rf $PACKAGE_DIR/*
|
|
||||||
echo "Deleted the content of $PACKAGE_DIR"
|
|
||||||
|
|
||||||
# Set environment variables and build the Go project
|
|
||||||
GOOS=linux GOARCH=arm64 go build -o $BUILD_FILE -tags lambda.norpc $SOURCE_FILE
|
|
||||||
echo "Built the Go project with GOOS=linux and GOARCH=arm64"
|
|
||||||
|
|
||||||
# Move the build file to the lambda-package directory
|
|
||||||
mv $BUILD_FILE $PACKAGE_DIR/
|
|
||||||
echo "Moved the build file to $PACKAGE_DIR"
|
|
||||||
|
|
||||||
# Change directory to lambda-package
|
|
||||||
cd $PACKAGE_DIR
|
|
||||||
|
|
||||||
# Zip the contents of lambda-package into lambda-deployment.zip
|
|
||||||
zip -r $ZIP_FILE *
|
|
||||||
echo "Zipped the contents of $PACKAGE_DIR into $ZIP_FILE"
|
|
||||||
|
|
||||||
# Return to the original directory
|
|
||||||
cd -
|
|
||||||
|
|
||||||
echo "Script completed successfully"
|
|
1
go.mod
1
go.mod
@ -8,7 +8,6 @@ require (
|
|||||||
github.com/antchfx/htmlquery v1.3.1 // indirect
|
github.com/antchfx/htmlquery v1.3.1 // indirect
|
||||||
github.com/antchfx/xmlquery v1.4.0 // indirect
|
github.com/antchfx/xmlquery v1.4.0 // indirect
|
||||||
github.com/antchfx/xpath v1.3.0 // indirect
|
github.com/antchfx/xpath v1.3.0 // indirect
|
||||||
github.com/aws/aws-lambda-go v1.47.0 // indirect
|
|
||||||
github.com/gobwas/glob v0.2.3 // indirect
|
github.com/gobwas/glob v0.2.3 // indirect
|
||||||
github.com/gocolly/colly v1.2.0 // indirect
|
github.com/gocolly/colly v1.2.0 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
|
2
go.sum
2
go.sum
@ -8,8 +8,6 @@ github.com/antchfx/xmlquery v1.4.0 h1:xg2HkfcRK2TeTbdb0m1jxCYnvsPaGY/oeZWTGqX/0h
|
|||||||
github.com/antchfx/xmlquery v1.4.0/go.mod h1:Ax2aeaeDjfIw3CwXKDQ0GkwZ6QlxoChlIBP+mGnDFjI=
|
github.com/antchfx/xmlquery v1.4.0/go.mod h1:Ax2aeaeDjfIw3CwXKDQ0GkwZ6QlxoChlIBP+mGnDFjI=
|
||||||
github.com/antchfx/xpath v1.3.0 h1:nTMlzGAK3IJ0bPpME2urTuFL76o4A96iYvoKFHRXJgc=
|
github.com/antchfx/xpath v1.3.0 h1:nTMlzGAK3IJ0bPpME2urTuFL76o4A96iYvoKFHRXJgc=
|
||||||
github.com/antchfx/xpath v1.3.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
|
github.com/antchfx/xpath v1.3.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs=
|
||||||
github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI=
|
|
||||||
github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||||
|
268
main.go
268
main.go
@ -1,12 +1,13 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-lambda-go/lambda"
|
|
||||||
"github.com/gocolly/colly"
|
"github.com/gocolly/colly"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -19,6 +20,8 @@ type job struct {
|
|||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
Link string `json:"link"`
|
Link string `json:"link"`
|
||||||
Skills skills `json:"skills"`
|
Skills skills `json:"skills"`
|
||||||
|
Scraped string `json:"scraped"`
|
||||||
|
Source string `json:"source"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type skills struct {
|
type skills struct {
|
||||||
@ -28,53 +31,85 @@ type skills struct {
|
|||||||
Svelte bool `json:"svelte"`
|
Svelte bool `json:"svelte"`
|
||||||
Nextjs bool `json:"nextjs"`
|
Nextjs bool `json:"nextjs"`
|
||||||
Typescript bool `json:"typescript"`
|
Typescript bool `json:"typescript"`
|
||||||
|
Tailwind bool `json:"tailwind"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
// Utility functions
|
||||||
jobs []job
|
|
||||||
lastFetch time.Time
|
|
||||||
cacheTTL = time.Minute * 5
|
|
||||||
jobLimit = 20
|
|
||||||
)
|
|
||||||
|
|
||||||
|
// Checks if a string contains any of the given keywords
|
||||||
func skillChecker(description string) skills {
|
func skillChecker(description string) skills {
|
||||||
return skills{
|
return skills{
|
||||||
React: strings.Contains(description, "React"),
|
React: strings.Contains(description, "React"),
|
||||||
Python: strings.Contains(description, "Python"),
|
Python: strings.Contains(description, "Python"),
|
||||||
Golang: strings.Contains(description, "Go"),
|
Golang: strings.Contains(description, "Golang"),
|
||||||
Svelte: strings.Contains(description, "Svelte"),
|
Svelte: strings.Contains(description, "Svelte"),
|
||||||
Nextjs: strings.Contains(description, "Next.js"),
|
Nextjs: strings.Contains(description, "Next.js"),
|
||||||
Typescript: strings.Contains(description, "TypeScript"),
|
Typescript: strings.Contains(description, "TypeScript"),
|
||||||
|
Tailwind: strings.Contains(description, "Tailwind"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchData() error {
|
// Converts job struct to json
|
||||||
|
func jobsToJson(file *os.File, jobs []job, fName string) {
|
||||||
|
// Encode jobs slice to JSON
|
||||||
|
encoder := json.NewEncoder(file)
|
||||||
|
encoder.SetIndent("", " ") // Pretty-print with indentation
|
||||||
|
if err := encoder.Encode(jobs); err != nil {
|
||||||
|
log.Fatalf("Cannot write to file %q: %s", fName, err)
|
||||||
|
}
|
||||||
|
|
||||||
baseUrl := "https://thehub.io"
|
fmt.Println("Job details successfully written to", fName)
|
||||||
// Instantiate default collector
|
}
|
||||||
c := colly.NewCollector(
|
|
||||||
// visit only the hub
|
|
||||||
colly.AllowedDomains("www.thehub.io", "thehub.io"),
|
|
||||||
|
|
||||||
// Cache responses to prevent multiple requests
|
func checkIfPaid(description string) {
|
||||||
colly.CacheDir("./tmp"),
|
for _, keyword := range unpaidKeywords {
|
||||||
)
|
if strings.Contains(strings.ToLower(description), keyword) {
|
||||||
|
|
||||||
// Slice of excluded words in the job titles
|
|
||||||
excluded := []string{"senior", "lead"}
|
|
||||||
// Instantiate a new collector to visit the job details page
|
|
||||||
detailsCollector := c.Clone()
|
|
||||||
|
|
||||||
// Limit the number of jobs to fetch
|
|
||||||
jobCount := 0
|
|
||||||
|
|
||||||
// On every <div> element with class "card__content attribute call callback
|
|
||||||
c.OnHTML("div[class=card__content]", func(e *colly.HTMLElement) {
|
|
||||||
// Return if the job limit has been reached
|
|
||||||
if jobCount >= jobLimit {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Get the title and ensure it doesn't contain any excluded words
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkIfStudent(description string) string {
|
||||||
|
for _, keyword := range studentKeywords {
|
||||||
|
if strings.Contains(strings.ToLower(description), keyword) {
|
||||||
|
return "student"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "full time"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slice to store job details
|
||||||
|
var (
|
||||||
|
excluded = []string{"senior", "lead", "founder", "cto", "vp of", "erfaren", "arkitekt", "architect", "manager", "ulønnet", "unpaid", "praktik", "cyber", "leder", "sikkerhed", "supporter", "sr."}
|
||||||
|
unpaidKeywords = []string{"unpaid", "praktik", "ulønnet"}
|
||||||
|
studentKeywords = []string{"studerende", "studenter", "student", "medhjælper"}
|
||||||
|
)
|
||||||
|
|
||||||
|
func scrapeHub() {
|
||||||
|
var (
|
||||||
|
jobs []job
|
||||||
|
jobCount int
|
||||||
|
fName = "/app/data/thehub.json"
|
||||||
|
maxJobs = 20
|
||||||
|
baseUrl = "https://thehub.io"
|
||||||
|
searchString = "https://thehub.io/jobs?roles=frontenddeveloper&roles=fullstackdeveloper&roles=backenddeveloper&roles=devops&paid=true&countryCode=DK&sorting=newJobs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create file after scraping is complete
|
||||||
|
c := colly.NewCollector(
|
||||||
|
colly.AllowedDomains("www.thehub.io", "thehub.io"),
|
||||||
|
)
|
||||||
|
|
||||||
|
detailsCollector := colly.NewCollector(
|
||||||
|
colly.AllowedDomains("www.thehub.io", "thehub.io"),
|
||||||
|
colly.CacheDir("/app/data/thehub_cache"),
|
||||||
|
)
|
||||||
|
|
||||||
|
c.OnHTML("div[class=card__content]", func(e *colly.HTMLElement) {
|
||||||
|
if jobCount >= maxJobs {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
title := e.ChildText("span.card-job-find-list__position")
|
title := e.ChildText("span.card-job-find-list__position")
|
||||||
for _, excludedWord := range excluded {
|
for _, excludedWord := range excluded {
|
||||||
if strings.Contains(strings.ToLower(title), excludedWord) {
|
if strings.Contains(strings.ToLower(title), excludedWord) {
|
||||||
@ -91,66 +126,189 @@ func fetchData() error {
|
|||||||
fmt.Println("Visiting", r.URL.String())
|
fmt.Println("Visiting", r.URL.String())
|
||||||
})
|
})
|
||||||
|
|
||||||
detailsCollector.OnHTML("div.view-job-details", func(e *colly.HTMLElement) {
|
detailsCollector.OnHTML("div[class='view-job-details']", func(e *colly.HTMLElement) {
|
||||||
if jobCount >= jobLimit {
|
if jobCount >= maxJobs {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Get logo and trim the url
|
|
||||||
|
|
||||||
logo := e.ChildAttr("div.media-item__image", "style")
|
logo := e.ChildAttr("div.media-item__image", "style")
|
||||||
cutLeft := "background-image:url("
|
cutLeft := "background-image:url("
|
||||||
cutRight := ");"
|
cutRight := ");"
|
||||||
trimmedLogo := strings.Trim(logo, cutLeft+cutRight)
|
trimmedLogo := strings.Trim(logo, cutLeft+cutRight)
|
||||||
|
|
||||||
// Get company name
|
descriptionHTML, err := e.DOM.Find("content.text-block__content > span").Html()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error getting HTML of description: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
jobDetails := job{
|
jobDetails := job{
|
||||||
Title: e.ChildText("h2[class=view-job-details__title]"),
|
Title: e.ChildText("h2[class=view-job-details__title]"),
|
||||||
Logo: trimmedLogo,
|
Logo: trimmedLogo,
|
||||||
Company: e.ChildText(".bullet-inline-list > a:first-child"),
|
Company: e.ChildText(".bullet-inline-list > a:first-child"),
|
||||||
Location: e.ChildText(".bullet-inline-list > a:nth-child(2)"),
|
Location: e.ChildText(".bullet-inline-list > a:nth-child(2)"),
|
||||||
Type: e.ChildText(".bullet-inline-list > a:nth-child(3)"),
|
Type: e.ChildText(".bullet-inline-list > a:nth-child(3)"),
|
||||||
Description: e.ChildText("content.text-block__content > span"),
|
Description: descriptionHTML,
|
||||||
Link: e.Request.URL.String(),
|
Link: e.Request.URL.String(),
|
||||||
Skills: skillChecker(e.ChildText("content.text-block__content > span")),
|
Skills: skillChecker(e.ChildText("content.text-block__content > span")),
|
||||||
|
Scraped: time.Now().String(),
|
||||||
|
Source: baseUrl,
|
||||||
}
|
}
|
||||||
jobs = append(jobs, jobDetails)
|
jobs = append(jobs, jobDetails)
|
||||||
jobCount++
|
jobCount++
|
||||||
|
fmt.Printf("Scraped job %d from TheHub\n", jobCount)
|
||||||
})
|
})
|
||||||
// Handle pagination
|
|
||||||
c.OnHTML("a.page-link", func(e *colly.HTMLElement) {
|
c.OnHTML("a.page-link", func(e *colly.HTMLElement) {
|
||||||
|
if jobCount >= maxJobs {
|
||||||
|
return
|
||||||
|
}
|
||||||
nextPage := e.Attr("href")
|
nextPage := e.Attr("href")
|
||||||
if nextPage != "" {
|
if nextPage != "" {
|
||||||
fullNextPage := baseUrl + nextPage
|
fullNextPage := baseUrl + nextPage
|
||||||
fmt.Println("Visiting next page:", fullNextPage)
|
|
||||||
e.Request.Visit(fullNextPage)
|
e.Request.Visit(fullNextPage)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
// Visit the initial URL to start scraping
|
|
||||||
err := c.Visit("https://thehub.io/jobs?roles=frontenddeveloper&roles=fullstackdeveloper&roles=backenddeveloper&search=developer&paid=true&countryCode=DK&sorting=newJobs")
|
// Add error handling for the initial visit
|
||||||
|
err := c.Visit(searchString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
log.Printf("Error visiting TheHub: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for all collectors to finish
|
||||||
|
c.Wait()
|
||||||
|
detailsCollector.Wait()
|
||||||
|
|
||||||
|
// Write jobs to file after scraping is complete
|
||||||
|
if len(jobs) > 0 {
|
||||||
|
file, err := os.Create(fName)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Cannot create file %q: %s", fName, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
jobsToJson(file, jobs, fName)
|
||||||
|
fmt.Printf("Successfully scraped %d jobs from TheHub\n", len(jobs))
|
||||||
|
} else {
|
||||||
|
log.Println("No jobs were scraped from TheHub")
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func handler(ctx context.Context) ([]job, error) {
|
func scrapeItJobBank() {
|
||||||
// Check if cache is valid
|
var (
|
||||||
if time.Since(lastFetch) < cacheTTL && len(jobs) > 0 {
|
jobs []job
|
||||||
return jobs, nil
|
jobCount int
|
||||||
}
|
fName = "/app/data/it-jobbank.json"
|
||||||
|
maxJobs = 20
|
||||||
|
baseUrl = "https://www.it-jobbank.dk"
|
||||||
|
searchString = "https://www.it-jobbank.dk/jobsoegning/udvikling"
|
||||||
|
)
|
||||||
|
|
||||||
// Fetch new data
|
c := colly.NewCollector(
|
||||||
err := fetchData()
|
colly.AllowedDomains("www.it-jobbank.dk", "it-jobbank.dk"),
|
||||||
|
)
|
||||||
|
|
||||||
|
detailsCollector := colly.NewCollector(
|
||||||
|
colly.AllowedDomains("www.it-jobbank.dk", "it-jobbank.dk"),
|
||||||
|
colly.CacheDir("/app/data/itjobbank_cache"),
|
||||||
|
)
|
||||||
|
|
||||||
|
c.OnHTML("div[class=result]", func(e *colly.HTMLElement) {
|
||||||
|
if jobCount >= maxJobs {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
title := e.ChildText("h3.job-title > a")
|
||||||
|
for _, excludedWord := range excluded {
|
||||||
|
if strings.Contains(strings.ToLower(title), excludedWord) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fullLink := e.ChildAttr("h3.job-title > a", "href")
|
||||||
|
|
||||||
|
detailsCollector.Visit(fullLink)
|
||||||
|
})
|
||||||
|
|
||||||
|
detailsCollector.OnRequest(func(r *colly.Request) {
|
||||||
|
fmt.Println("Visiting", r.URL.String())
|
||||||
|
})
|
||||||
|
|
||||||
|
detailsCollector.OnHTML("section > div", func(e *colly.HTMLElement) {
|
||||||
|
if jobCount >= maxJobs {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
descriptionHTML, err := e.DOM.Find("div[id=job_ad]").Html()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error getting HTML of description: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checkIfPaid(descriptionHTML)
|
||||||
|
|
||||||
|
title := e.ChildText("h1.title")
|
||||||
|
if title == "" {
|
||||||
|
title = e.ChildText("h1[id=jobtitle]")
|
||||||
|
}
|
||||||
|
|
||||||
|
jobDetails := job{
|
||||||
|
Title: title,
|
||||||
|
Logo: baseUrl + e.ChildAttr("div.company-logo > img", "src"),
|
||||||
|
Company: e.ChildText("p.published"),
|
||||||
|
Location: e.ChildText("div.job-location > p.caption"),
|
||||||
|
Type: checkIfStudent(descriptionHTML),
|
||||||
|
Description: descriptionHTML,
|
||||||
|
Link: e.Request.URL.String(),
|
||||||
|
Skills: skillChecker(descriptionHTML),
|
||||||
|
Scraped: time.Now().String(),
|
||||||
|
Source: baseUrl,
|
||||||
|
}
|
||||||
|
jobs = append(jobs, jobDetails)
|
||||||
|
jobCount++
|
||||||
|
fmt.Printf("Scraped job %d from IT JobBank\n", jobCount)
|
||||||
|
})
|
||||||
|
|
||||||
|
c.OnHTML("a.page-link", func(e *colly.HTMLElement) {
|
||||||
|
if jobCount >= maxJobs {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
nextPage := e.Attr("href")
|
||||||
|
if nextPage != "" {
|
||||||
|
e.Request.Visit(nextPage)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Add error handling for the initial visit
|
||||||
|
err := c.Visit(searchString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
log.Printf("Error visiting IT JobBank: %s", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update cache timestamp
|
// Wait for all collectors to finish
|
||||||
lastFetch = time.Now()
|
c.Wait()
|
||||||
|
detailsCollector.Wait()
|
||||||
|
|
||||||
return jobs, nil
|
// Write jobs to file after scraping is complete
|
||||||
|
if len(jobs) > 0 {
|
||||||
|
file, err := os.Create(fName)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Cannot create file %q: %s", fName, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
jobsToJson(file, jobs, fName)
|
||||||
|
fmt.Printf("Successfully scraped %d jobs from IT JobBank\n", len(jobs))
|
||||||
|
} else {
|
||||||
|
log.Println("No jobs were scraped from IT JobBank")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
lambda.Start(handler)
|
scrapeHub()
|
||||||
|
scrapeItJobBank()
|
||||||
}
|
}
|
||||||
|
19
readme.md
19
readme.md
@ -1,13 +1,18 @@
|
|||||||
# The Hub Scraper
|
# IT jobs scraper
|
||||||
|
|
||||||
deprecated as lambda was a bad solution for this, without setting up dynamodb, api etc. This will go live in a ec2 so it can write to local storage instead of running on demand.
|
This is a simple scraper that extracts job details from the [The Hub](https://thehub.io) website and itjobbank.
|
||||||
|
|
||||||
Go is fast but free tier lambda is not and I am not yet a smart man.
|
|
||||||
|
|
||||||
<del>This is a simple scraper that extracts job details from the [The Hub](https://thehub.io) website.</del>
|
## Filtering
|
||||||
|
|
||||||
<del>It's a fork of the original [The Hub Scraper](https://gitea.rannes.dev/rannes.dev/sw-jobs-go) by [Rannes](https://gitea.rannes.dev/rannes.dev).</del>
|
The scraper filters out a list of keywords like senior, architect etc. as I wrote it for entry and mid level roles. It also filters out unpaid form the hub, and keyword based from itjobbank.
|
||||||
|
|
||||||
<del>## Usage</del>
|
## Usage
|
||||||
|
|
||||||
<del>To run the scraper zip it deploy it to AWS Lambda and then call the function. </del>
|
To run the scraper, simply execute the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go run scraper.go
|
||||||
|
```
|
||||||
|
|
||||||
|
The scraper will create a `thehub.json` and `itjobbank.json` file in the current directory, which contains a list of job details in JSON format. It caches the pages, so very light on resources and requests.
|
||||||
|
3
run-scrapers.sh
Normal file
3
run-scrapers.sh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
cd /home/admin/sw-jobs-go
|
||||||
|
go run go-scraper >> scraper.log 2>&1
|
Loading…
Reference in New Issue
Block a user