Go
Go code examples for using Residential Proxy with net/http package.
Practical Go examples for integrating Residential Proxy into your scraping applications.
1. Basic HTTP Request
This example demonstrates the fundamental setup for using Residential Proxy with Go's net/http package. It configures a static proxy session and makes a simple GET request.
package main
import (
"fmt"
"io"
"net/http"
"net/url"
)
func main() {
// Static proxy
proxyURL, _ := url.Parse("http://user-country-us-sessid-go1:pass123@network.mrproxy.com:10000")
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyURL(proxyURL),
},
}
resp, err := client.Get("https://api.ipify.org")
if err != nil {
fmt.Println("Error:", err)
return
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
fmt.Println("Your IP:", string(body))
}Use Case: Perfect for simple proxy testing, IP verification, and single-request scenarios where you need a consistent IP address through a static session.
2. Rotating Proxy for Scraping
Shows how to create a reusable scraping function that uses rotating proxies (no session ID). Each call gets a fresh IP address, ideal for avoiding rate limits.
package main
import (
"fmt"
"io"
"net/http"
"net/url"
)
func scrapeWithProxy(targetURL string) error {
// Rotating proxy - new IP per request
proxyURL, _ := url.Parse("http://user-country-uk:pass123@network.mrproxy.com:10000")
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyURL(proxyURL),
},
}
resp, err := client.Get(targetURL)
if err != nil {
return fmt.Errorf("failed to fetch %s: %w", targetURL, err)
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
fmt.Printf("Scraped %s: %d bytes (status: %d)\n", targetURL, len(body), resp.StatusCode)
return nil
}
func main() {
urls := []string{
"https://example.com/page1",
"https://example.com/page2",
"https://example.com/page3",
}
for _, url := range urls {
if err := scrapeWithProxy(url); err != nil {
fmt.Println("Error:", err)
}
}
}Use Case: High-volume scraping where IP diversity is crucial for avoiding detection and rate limiting. Each page request appears to come from a different visitor.
3. Concurrent Scraping
Demonstrates parallel scraping using goroutines with different static proxy sessions. Each goroutine maintains its own consistent IP address for the duration of its work.
package main
import (
"fmt"
"io"
"net/http"
"net/url"
"sync"
)
func scrapeWithProxy(targetURL string, proxyAuth string, wg *sync.WaitGroup) {
defer wg.Done()
proxyURL, _ := url.Parse(fmt.Sprintf("http://%s@network.mrproxy.com:10000", proxyAuth))
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyURL(proxyURL),
},
}
resp, err := client.Get(targetURL)
if err != nil {
fmt.Printf("Error scraping %s: %v\n", targetURL, err)
return
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
fmt.Printf("Scraped %s: %d bytes\n", targetURL, len(body))
}
func main() {
urls := []string{
"https://example.com/page1",
"https://example.com/page2",
"https://example.com/page3",
}
var wg sync.WaitGroup
for i, targetURL := range urls {
wg.Add(1)
proxyAuth := fmt.Sprintf("user-country-fr-sessid-worker%d:pass123", i+1)
go scrapeWithProxy(targetURL, proxyAuth, &wg)
}
wg.Wait()
fmt.Println("All scraping completed")
}Use Case: High-performance scraping that needs to process multiple URLs quickly while maintaining separate identities. Great for scraping sites with per-IP rate limits.Use Case: High-performance scraping that needs to process multiple URLs quickly while maintaining separate identities. Great for scraping sites with per-IP rate limits.
4. Error Handling & Retries
Implements robust error handling with exponential backoff retry logic. Essential for production scraping applications that need to handle network failures gracefully.
package main
import (
"fmt"
"io"
"math"
"net/http"
"net/url"
"time"
)
func scrapeWithRetry(targetURL string, maxRetries int) ([]byte, error) {
proxyURL, _ := url.Parse("http://user-country-de:pass123@network.mrproxy.com:10000")
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyURL(proxyURL),
},
Timeout: 30 * time.Second,
}
var lastErr error
for attempt := 0; attempt < maxRetries; attempt++ {
resp, err := client.Get(targetURL)
if err != nil {
lastErr = err
backoff := time.Duration(math.Pow(2, float64(attempt))) * time.Second
fmt.Printf("Attempt %d failed: %v. Retrying in %v...\n", attempt+1, err, backoff)
time.Sleep(backoff)
continue
}
defer resp.Body.Close()
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
body, _ := io.ReadAll(resp.Body)
return body, nil
}
lastErr = fmt.Errorf("HTTP %d", resp.StatusCode)
backoff := time.Duration(math.Pow(2, float64(attempt))) * time.Second
fmt.Printf("Attempt %d returned %d. Retrying in %v...\n", attempt+1, resp.StatusCode, backoff)
time.Sleep(backoff)
}
return nil, fmt.Errorf("failed after %d attempts: %w", maxRetries, lastErr)
}
func main() {
body, err := scrapeWithRetry("https://example.com", 3)
if err != nil {
fmt.Println("Failed:", err)
} else {
fmt.Printf("Success: %d bytes\n", len(body))
}
}Use Case: Production-ready scraping that must handle temporary network issues, server errors, and proxy failures without crashing or losing data.
5. Custom Headers
Shows how to add realistic browser headers to your requests when using proxies. This helps avoid detection by making requests appear more legitimate.
package main
import (
"fmt"
"io"
"net/http"
"net/url"
)
func scrapeWithHeaders(targetURL string) error {
proxyURL, _ := url.Parse("http://user-country-jp:pass123@network.mrproxy.com:10000")
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyURL(proxyURL),
},
}
req, _ := http.NewRequest("GET", targetURL, nil)
// Add custom headers
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64)")
req.Header.Set("Accept-Language", "ja-JP,ja;q=0.9")
req.Header.Set("Accept", "text/html,application/xhtml+xml")
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
fmt.Printf("Scraped %d bytes with status %d\n", len(body), resp.StatusCode)
return nil
}
func main() {
if err := scrapeWithHeaders("https://example.com"); err != nil {
fmt.Println("Error:", err)
}
}Use Case: Scraping sites that check for browser-like behavior or when you need to match your headers to your proxy's geographic location for consistency.
6. Context with Timeout
Demonstrates using Go's context package for proper timeout handling and request cancellation. This prevents requests from hanging indefinitely.
package main
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"time"
)
func scrapeWithContext(targetURL string) error {
proxyURL, _ := url.Parse("http://user-country-ca-sessid-ctx1:pass123@network.mrproxy.com:10000")
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyURL(proxyURL),
},
}
// Create context with 30-second timeout
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
req, _ := http.NewRequestWithContext(ctx, "GET", targetURL, nil)
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
fmt.Printf("Scraped %d bytes\n", len(body))
return nil
}
func main() {
if err := scrapeWithContext("https://example.com"); err != nil {
fmt.Println("Error:", err)
}
}Use Case: Applications that need precise control over request timeouts and graceful cancellation, especially in concurrent scenarios or when dealing with unreliable target sites.
7. Rate Limiting
Implements respectful scraping with controlled request rates using Go's time.Ticker. This prevents overwhelming target servers while maintaining steady throughput.
package main
import (
"fmt"
"io"
"net/http"
"net/url"
"time"
)
func scrapeWithRateLimit(urls []string, requestsPerMinute int) {
proxyURL, _ := url.Parse("http://user-country-au:pass123@network.mrproxy.com:10000")
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyURL(proxyURL),
},
}
delay := time.Minute / time.Duration(requestsPerMinute)
ticker := time.NewTicker(delay)
defer ticker.Stop()
for _, targetURL := range urls {
<-ticker.C // Wait for next tick
resp, err := client.Get(targetURL)
if err != nil {
fmt.Printf("Error scraping %s: %v\n", targetURL, err)
continue
}
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
fmt.Printf("Scraped %s: %d bytes\n", targetURL, len(body))
}
}
func main() {
urls := []string{
"https://example.com/page1",
"https://example.com/page2",
"https://example.com/page3",
}
scrapeWithRateLimit(urls, 20) // 20 requests per minute
}Use Case: Ethical scraping that respects server resources and terms of service. Essential for long-running scraping operations that need to avoid being blocked.