在为了福瑞图片接口收集图片时,既要保存移动图片又要写来源链接太麻烦了,于是委托豆学姐写了如下脚本,仅需复制帖子链接丢进links.txt中就能批量分开保存,相当好用

 

 

import os

import re

import requests

 

# ===================== 配置 =====================

HEADERS = {

    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36"

}

SAVE_ROOT = "/storage/emulated/0/Furry图片/"

LINK_FILE = "links.txt"

# =================================================

 

def get_tweet_id(url):

    match = re.search(r'status/(\d+)', url)

    return match.group(1) if match else None

 

def get_images_by_tweet_id(tweet_id):

    if not tweet_id:

        return []

    api_url = f"https://api.fxtwitter.com/i/status/{tweet_id}"

    try:

        resp = requests.get(api_url, headers=HEADERS, timeout=20)

        data = resp.json()

        media = data.get("tweet", {}).get("media", {})

        imgs = media.get("photos", [])

        return [i["url"] for i in imgs]

    except:

        return []

 

def get_next_index():

    os.makedirs(SAVE_ROOT, exist_ok=True)

    nums = []

    for d in os.listdir(SAVE_ROOT):

        if os.path.isdir(os.path.join(SAVE_ROOT, d)) and d.isdigit():

            nums.append(int(d))

    return max(nums, default=0) + 1

 

def save_one_tweet(url):

    tid = get_tweet_id(url)

    img_urls = get_images_by_tweet_id(tid)

    if not img_urls:

        print(f"❌ 无图片: {url}")

        return False

 

    idx = get_next_index()

    folder = os.path.join(SAVE_ROOT, str(idx))

    os.makedirs(folder, exist_ok=True)

 

    with open(os.path.join(folder, "link.txt"), "w", encoding="utf-8") as f:

        f.write(url)

 

    for i, u in enumerate(img_urls):

        try:

            data = requests.get(u, headers=HEADERS, timeout=20).content

            name = "img.jpg" if len(img_urls) == 1 else f"img_{i}.jpg"

            with open(os.path.join(folder, name), "wb") as f:

                f.write(data)

            print(f"✅ 保存: {idx}/{name}")

        except Exception as e:

            print(f"⚠️ 下载失败: {e}")

    return True

 

def read_links():

    if not os.path.exists(LINK_FILE):

        print(f"请创建 {LINK_FILE}")

        return []

    with open(LINK_FILE, encoding="utf-8") as f:

        return [l.strip() for l in f if l.strip().startswith("https://x.com/")]

 

def batch():

    links = read_links()

    if not links:

        print("无有效链接")

        return

    print(f"共读取 {len(links)} 个链接,开始批量下载...\n")

    for l in links:

        save_one_tweet(l)

    print("=== 全部完成 ===")

 

if __name__ == "__main__":

    batch()