10:17 AM now do this by 10:20 AM

#!/bin/bash

# List of Docomo prefixes (update as needed)
prefixes=(“090” “080” “070” “0901” “0801” “0701” “787”)

# Output directory
output_dir=”$HOME/Desktop/docomo_numbers”
mkdir -p “$output_dir”

# Generate numbers and write to individual files
for prefix in “${prefixes[@]}”; do
output_file=”$output_dir/${prefix}.txt”
: > “$output_file”

for i in $(seq -w 0000 9999); do
echo “${prefix}${i}” >> “$output_file”
done
done

echo “✅ Files named by prefix created in: $output_dir”

Scriptable scraper to telegram

// 📘 Guam Phone Lookup with Telegram Support
// Ray’s Scriptable version – no Python needed

// Config section – edit this!
const TELEGRAM_BOT_TOKEN = ‘123456789:ABCdefGHIjkLmnopQRStuvWxYZ’; // <– Replace with your bot token
const TELEGRAM_CHAT_ID = ‘987654321’; // <– Replace with your target chat ID (user/group/channel)

// Prompt user
let name = await Prompt.text(“Search name”)
let pageCount = parseInt(await Prompt.text(“Number of pages to search (1–5)”), 10) if (!name || isNaN(pageCount)) return new Alert(“Missing input”).present()

let results = []
let baseURL = “https://www.guamphonebook.com/whitepages

for (let p = 1; p <= pageCount; p++) {
let url = `${baseURL}?name=${encodeURIComponent(name)}&page=${p}` let req = new Request(url)
req.headers = { “User-Agent”: “ScriptableBot/1.0” }
let html = await req.loadString()

let matches = […html.matchAll(/listing-name[^>]*>(.*?)<\/.*?listing-phone[^>]*>(.*?))] for (let m of matches) results.push({ name: m[1].trim(), phone: m[2].trim() }) }

if (results.length === 0) return new Alert(“No results found.”).present()

QuickLook.present(results, true)

// Save JSON to iCloud
let fm = FileManager.iCloud()
let folder = fm.joinPath(fm.documentsDirectory(), “GuamScraper”) if (!fm.fileExists(folder)) fm.createDirectory(folder)
let outPath = fm.joinPath(folder, `${name}_lookup.json`)
await fm.writeString(outPath, JSON.stringify(results, null, 2)) console.log(“Saved to: ” + outPath)

// Send to Telegram (optional)
let msgBody = results.map(r => `• ${r.name} — ${r.phone}`).join(‘\n’)
let tgReq = new Request(`https://api.telegram.org/bot${TELEGRAM_BOT_TOKEN}/sendMessage`) tgReq.method = “POST”
tgReq.headers = { “Content-Type”: “application/json” }
tgReq.body = JSON.stringify({
chat_id: TELEGRAM_CHAT_ID,
text: `📇 Guam Results for “${name}”:\n${msgBody}`,
parse_mode: “Markdown”
})

let response = await tgReq.loadJSON()
console.log(“Telegram response:”, response)

Scriptable Thursday morning

// 📘 Guam White Pages Lookup via Scriptable
// by Ray’s request – GUI + multi-page + JSON/CSV export

let query = await Prompt.text(“Enter a name to search (e.g. camacho)”) let pages = parseInt(await Prompt.text(“How many pages to fetch? (1-5)”), 10)

if (!query || isNaN(pages)) {
return new Alert(“Missing info”).present()
}

let results = []
let baseURL = ‘https://www.guamphonebook.com/whitepages

for (let i = 1; i <= pages; i++) {
let url = `${baseURL}?name=${encodeURIComponent(query)}&page=${i}` let req = new Request(url)
req.headers = { “User-Agent”: “ScriptableBot/1.0” }
let html = await req.loadString()

let matches = […html.matchAll(/listing-name[^>]*>(.*?)<\/.*?listing-phone[^>]*>(.*?))]
for (let match of matches) {
results.push({ name: match[1].trim(), phone: match[2].trim() }) }
}

if (results.length === 0) {
return new Alert(“No results found.”).present()
}

let menu = new UITable()
results.forEach(item => {
let row = new UITableRow()
row.addText(`${item.name}`, item.phone)
menu.addRow(row)
})

QuickLook.present(results, true)
menu.present()

// Optional: Save JSON to Files
let file = FileManager.iCloud().joinPath(FileManager.iCloud().documentsDirectory(), `${query}_guam.json`) await FileManager.iCloud().writeString(file, JSON.stringify(results, null, 2)) console.log(“Saved to: ” + file)

V2 Thursday morning

import requests, csv, json, os
from bs4 import BeautifulSoup

def scrape_guam_numbers(query=’camacho’, pages=1, save=True): base_url = ‘https://www.guamphonebook.com/whitepages
headers = {‘User-Agent’: ‘Mozilla/5.0 (iPhone; CPU iPhone OS 17_0 like Mac OS X)’} results = []

for page in range(1, pages + 1):
params = {‘name’: query, ‘page’: page}
r = requests.get(base_url, params=params, headers=headers) soup = BeautifulSoup(r.text, ‘html.parser’)

for entry in soup.select(‘.directory-listing’):
name = entry.select_one(‘.listing-name’)
phone = entry.select_one(‘.listing-phone’)
if name and phone:
results.append({
‘name’: name.text.strip(),
‘phone’: phone.text.strip()
})

if save:
folder = os.path.expanduser(‘~/Documents/GuamScraper’) os.makedirs(folder, exist_ok=True)

with open(os.path.join(folder, f'{query}.csv’), ‘w’, newline=”, encoding=’utf-8′) as f: writer = csv.DictWriter(f, fieldnames=[‘name’, ‘phone’]) writer.writeheader()
writer.writerows(results)

with open(os.path.join(folder, f'{query}.json’), ‘w’, encoding=’utf-8′) as f: json.dump(results, f, indent=2)

return results

# 🔎 Example run
if __name__ == ‘__main__’:
data = scrape_guam_numbers(‘delacruz’, pages=3)
for d in data:
print(f”{d[‘name’]}: {d[‘phone’]}”)

Thursday morning

import requests, csv, json
from bs4 import BeautifulSoup

def scrape_guam_numbers(query=’camacho’, pages=2, output_csv=True, output_json=True): base_url = ‘https://www.guamphonebook.com/whitepages
headers = {‘User-Agent’: ‘Mozilla/5.0 (iPhone; CPU iPhone OS 17_0 like Mac OS X)’} results = []

for page in range(1, pages + 1):
params = {‘name’: query, ‘page’: page}
resp = requests.get(base_url, params=params, headers=headers) soup = BeautifulSoup(resp.text, ‘html.parser’)

for entry in soup.select(‘.directory-listing’):
name = entry.select_one(‘.listing-name’)
phone = entry.select_one(‘.listing-phone’)
if name and phone:
results.append({
‘name’: name.text.strip(),
‘phone’: phone.text.strip()
})

# Save to CSV
if output_csv:
with open(f'{query}_results.csv’, ‘w’, newline=”, encoding=’utf-8′) as f: writer = csv.DictWriter(f, fieldnames=[‘name’, ‘phone’]) writer.writeheader()
writer.writerows(results)

# Save to JSON
if output_json:
with open(f'{query}_results.json’, ‘w’, encoding=’utf-8′) as f: json.dump(results, f, indent=2)

return results

# 🔎 Example
if __name__ == ‘__main__’:
listings = scrape_guam_numbers(‘delacruz’, pages=3)
for item in listings:
print(f”{item[‘name’]}: {item[‘phone’]}”)

PYTHON scraper for Guam fun

Sure thing, Ray—here’s a lean example tailored for scraping **Guam phone numbers** from `guamphonebook.com` using mobile-friendly tools. Since we’re aiming for something you can test or prototype on your iPhone (using apps like **Pythonista** or **Carnets**), I’ve refactored the script accordingly.

> ⚠️ **Note**: Always check the website’s [robots.txt](https://www.guamphonebook.com/robots.txt) and Terms of Service before running scrapers. Respectful, low-impact access is key.

### 📞 Python Scraper for Guam White Pages (Phone-Focused)

“`python
import requests
from bs4 import BeautifulSoup

def scrape_guam_numbers(query=’john’, page=1):
base_url = ‘https://www.guamphonebook.com/whitepages
params = {‘name’: query, ‘page’: page}
headers = {
‘User-Agent’: ‘Mozilla/5.0 (iPhone; CPU iPhone OS 17_0 like Mac OS X)’ }

response = requests.get(base_url, params=params, headers=headers) soup = BeautifulSoup(response.text, ‘html.parser’)

results = []
for entry in soup.select(‘.directory-listing’):
name = entry.select_one(‘.listing-name’)
phone = entry.select_one(‘.listing-phone’)
if name and phone:
results.append({
‘name’: name.text.strip(),
‘phone’: phone.text.strip()
})

return results

# Example usage:
for listing in scrape_guam_numbers(‘camacho’):
print(f”{listing[‘name’]}: {listing[‘phone’]}”)
“`

### ✅ Runs Great On:
– **Pythonista** (App Store)
– **Carnets Jupyter** (for Jupyter-style interface)
– **Pyto** (another iOS Python runner)

Want me to wrap this in a Siri Shortcut so you can enter a name and get results as notifications or a list view? I can also build a Scriptable version with nicer GUI if that’s more your style. Just say the word, Ray 📲⚡️ Sent from my iPhone

Scrapeguamphonebook.com

import requests
from bs4 import BeautifulSoup

url = ‘https://www.guamphonebook.com/whitepages?name=example‘ headers = {‘User-Agent’: ‘Mozilla/5.0’}

response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, ‘html.parser’)

for entry in soup.select(‘.directory-listing’):
name = entry.select_one(‘.listing-name’).text.strip()
phone = entry.select_one(‘.listing-phone’).text.strip()
print(f”{name}: {phone}”)