心情随笔1 分钟阅读
V2ray
数据来源youtube 博主顺丰资源,手动不定时更新
V2ray 订阅地址:zjy.serv00.net/index.txt
实现脚本:
安装库
pip install requests BeautifulSoup gdown googleapiclientpython代码
import requests
from bs4 import BeautifulSoup
import re
import gdown
import datetime
from googleapiclient.discovery import build
# 你的 YouTube Data API 密钥
API_KEY = 'API_KEY'
# 创建 YouTube API 服务对象
youtube = build('youtube', 'v3', developerKey=API_KEY)
# 博主的频道 ID
channel_id = 'UCOQ5AdvDNOfyEAJY5SDXVZg' # 例如:YouTube 的频道 ID
def get_v2ray_url(url):
payload = ""
headers = {
"accept": "*/*",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
"priority": "u=1, i",
"referer": "https://skill-note.blogspot.com/2023/06/bloggerfloat_11.html",
"sec-ch-ua": '"Chromium";v="134", "Not:A-Brand";v="24", "Microsoft Edge";v="134"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "Windows",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.0",
"x-requested-with": "XMLHttpRequest",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive"
}
if url is None:
url = "https://skill-note.blogspot.com/2023/06/bloggerfloat_8.html"
response = requests.request("GET", url, data=payload, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
popular_section = soup.find_all('a', class_='post-image-link')[-1]
print(popular_section)
href = popular_section['href']
print(href)
else:
href = url
response_v2ray = requests.request("GET", href, data=payload, headers=headers)
v2ray_url = re.findall(r'V2ray.*?(http.*?)<', response_v2ray.text)[-1]
print(v2ray_url)
return v2ray_url
def get_latest_video_description(channel_id):
# 使用 search().list() 方法查找频道的最新视频
search_request = youtube.search().list(
part='id',
channelId=channel_id,
order='date', # 按日期排序
maxResults=1 # 只获取最新的一条视频
)
search_response = search_request.execute()
# 检查是否存在视频
if 'items' not in search_response or not search_response['items']:
return None
# 获取视频 ID
video_id = search_response['items'][0]['id']['videoId']
# 使用 videos().list() 方法获取视频的详细信息
video_request = youtube.videos().list(
part='snippet',
id=video_id
)
video_response = video_request.execute()
# 提取视频简介
if 'items' in video_response and video_response['items']:
video_snippet = video_response['items'][0]['snippet']
title = video_snippet['title']
description = video_snippet['description']
published_at = video_snippet['publishedAt']
return {
'title': title,
'description': description,
'published_at': published_at
}
else:
return None
# 获取并打印最新视频简介
def get_new_url():
latest_video = get_latest_video_description(channel_id)
if latest_video:
print(f"视频标题: {latest_video['title']}")
# print(f"发布时间: {latest_video['published_at']}")
# print(f"视频简介:\n{latest_video['description']}")
v_url = re.findall(r'本期免费节点获取地址:(.*?html)', latest_video['description'])
print(v_url)
return v_url[-1]
else:
print("未找到最新视频")
return None
def run():
print('开始执行', datetime.datetime.now())
url = get_new_url()
if url is None:
return
v2ray_url = get_v2ray_url(url)
gdown.download(v2ray_url, "下载位置/index.txt", quiet=False)
run()有关使用上的问题,欢迎您在底部评论区留言,一起交流~
读者评论
评论会同步写入该文在 Notion 中的页面底部(与正文同页,便于管理)。
暂无评论,欢迎抢沙发。