Compare commits
36 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
112bf9a52c | ||
|
|
3d05902824 | ||
|
|
3adc110298 | ||
|
|
ba33d5743f | ||
|
|
bd9f590b0a | ||
|
|
e3f84f4c17 | ||
|
|
4a7ad2ec75 | ||
|
|
a285777217 | ||
|
|
44cc5d5677 | ||
|
|
74a22be16c | ||
|
|
972a37b497 | ||
|
|
5fc6f7ab6f | ||
|
|
d389a61f09 | ||
|
|
8dca51b897 | ||
|
|
97ee25b65d | ||
|
|
208ce8a4f9 | ||
|
|
44f28e593a | ||
|
|
d7d3e1ca65 | ||
|
|
0c04bb1355 | ||
|
|
d232d1cf12 | ||
|
|
4df21fd258 | ||
|
|
08bae46742 | ||
|
|
a9b3f6b972 | ||
|
|
afa2115b0d | ||
|
|
e06e292b1f | ||
|
|
90c6dd3d79 | ||
|
|
596e409889 | ||
|
|
31c26b00fb | ||
|
|
8689738f4f | ||
|
|
387545ab78 | ||
|
|
98fdd61673 | ||
|
|
3f802a0ed3 | ||
|
|
33bb588c36 | ||
|
|
cc4b04ede2 | ||
|
|
3abe4419d6 | ||
|
|
d2d9ad1db7 |
4
.github/workflows/build-dev.yml
vendored
4
.github/workflows/build-dev.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
goarch: [amd64, arm64]
|
||||
env:
|
||||
OUTPUT_BINARY: ghproxy
|
||||
GO_VERSION: 1.24
|
||||
GO_VERSION: 1.25
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -73,7 +73,7 @@ jobs:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
run: |
|
||||
CGO_ENABLED=0 go build -ldflags "-X main.version=${{ env.VERSION }} -X main.dev=true" -o ${{ env.OUTPUT_BINARY }}-${{matrix.goos}}-${{matrix.goarch}} ./main.go
|
||||
CGO_ENABLED=0 go build -ldflags "-X main.version=${{ env.VERSION }} -X main.dev=true" -o ${{ env.OUTPUT_BINARY }}-${{matrix.goos}}-${{matrix.goarch}} .
|
||||
- name: 打包
|
||||
run: |
|
||||
mkdir ghproxyd
|
||||
|
||||
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@@ -47,7 +47,7 @@ jobs:
|
||||
goarch: [amd64, arm64]
|
||||
env:
|
||||
OUTPUT_BINARY: ghproxy
|
||||
GO_VERSION: 1.24
|
||||
GO_VERSION: 1.25
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
run: |
|
||||
CGO_ENABLED=0 go build -ldflags "-s -w -X main.version=${{ env.VERSION }}" -o ${{ env.OUTPUT_BINARY }}-${{matrix.goos}}-${{matrix.goarch}} ./main.go
|
||||
CGO_ENABLED=0 go build -ldflags "-s -w -X main.version=${{ env.VERSION }}" -o ${{ env.OUTPUT_BINARY }}-${{matrix.goos}}-${{matrix.goarch}} .
|
||||
- name: 打包
|
||||
run: |
|
||||
mkdir ghproxyd
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,8 +1,10 @@
|
||||
demo
|
||||
demo.toml
|
||||
demo.wanf
|
||||
*.log
|
||||
*.bak
|
||||
list.json
|
||||
iplist.json
|
||||
repos
|
||||
pages
|
||||
*_test
|
||||
94
CHANGELOG.md
94
CHANGELOG.md
@@ -1,5 +1,99 @@
|
||||
# 更新日志
|
||||
|
||||
4.3.5-rc.0 - 2025-09-14
|
||||
---
|
||||
- PRE-RELEASE: v4.3.5-rc.0是v4.3.5的预发布版本,请勿在生产环境中使用;
|
||||
- CHANGE: 改进`nest`实现, 减少内存分配`10371 B/op -> 1852 B/op` `43 allocs/op -> 14 allocs/op`
|
||||
- CHANGE: 为`nest`加入`dispatcher`实现, 为不同情况分配适合的处理器以保证性能与兼容性
|
||||
- CHANGE: 改进路径匹配热点的内存分配
|
||||
|
||||
4.3.4 - 2025-09-14
|
||||
---
|
||||
- CHANGE: 改进嵌套加速实现, 增强稳定性
|
||||
|
||||
4.3.3 - 2025-09-10
|
||||
---
|
||||
- CHANGE: 增强对[wanf](https://github.com/WJQSERVER/wanf)的支持
|
||||
- CHANGE: 更新包括Touka框架在内的各个依赖版本
|
||||
|
||||
4.3.2 - 2025-08-20
|
||||
---
|
||||
- FIX: 修正`cfg.Pages.StaticDir`为空时的处置
|
||||
|
||||
4.3.1 - 2025-08-13
|
||||
---
|
||||
- CHANGE: 更新至[Go 1.25](https://tip.golang.org/doc/go1.25)
|
||||
|
||||
4.3.0 - 2025-08-11
|
||||
---
|
||||
- CHANGE: 为OCI镜像(Docker)代理带来自动library附加功能
|
||||
- CHANGE(refactor): 改进OCI镜像(Docker)代理路径组成流程
|
||||
- ADD: 新增[WANF](https://github.com/WJQSERVER/wanf)配置文件格式支持
|
||||
|
||||
4.3.0-rc.0 - 2025-08-11
|
||||
---
|
||||
- PRE-RELEASE: v4.3.0-rc.0是v4.3.0的预发布版本,请勿在生产环境中使用;
|
||||
- CHANGE: 为OCI镜像(Docker)代理带来自动library附加功能
|
||||
- CHANGE(refactor): 改进OCI镜像(Docker)代理路径组成流程
|
||||
- ADD: 新增[WANF](https://github.com/WJQSERVER/wanf)配置文件格式支持
|
||||
|
||||
4.2.7 - 2025-08-04
|
||||
---
|
||||
- CHANGE: 在OCI镜像(docker)代理部分增加特殊处理, 保证可用性 参看[#159](https://github.com/WJQSERVER-STUDIO/ghproxy/issues/159)
|
||||
- CHANGE: 更新Touka框架, 同步解决部分日志过多问题
|
||||
|
||||
4.2.6 - 2025-08-01
|
||||
---
|
||||
- CHANGE: 修正匹配器
|
||||
|
||||
4.2.5 - 2025-07-31
|
||||
---
|
||||
- CHANGE: 进一步完善匹配器, 兼容更多情况
|
||||
|
||||
4.2.4 - 2025-07-29
|
||||
---
|
||||
- CHANGE: 改进匹配器, 防止匹配不应匹配的内容
|
||||
|
||||
4.2.4-rc.0 - 2025-07-29
|
||||
---
|
||||
- PRE-RELEASE: v4.2.4-rc.0是v4.2.4预发布版本,请勿在生产环境中使用;
|
||||
- CHANGE: 改进匹配器, 防止匹配不应匹配的内容
|
||||
|
||||
4.2.3 - 2025-07-27
|
||||
---
|
||||
- CHANGE: 改进错误页面加载器, 避免在选择`external`模式时错误页面渲染回退到json输出
|
||||
- CHANGE: 完善OCI(Docker)镜像代理默认target逻辑
|
||||
|
||||
4.2.3-rc.0 - 2025-07-27
|
||||
---
|
||||
- PRE-RELEASE: v4.2.3-rc.0是v4.2.3预发布版本,请勿在生产环境中使用;
|
||||
- CHANGE: 改进错误页面加载器, 避免在选择`external`模式时错误页面渲染回退到json输出
|
||||
- CHANGE: 完善OCI(Docker)镜像代理默认target逻辑
|
||||
|
||||
4.2.2 - 2025-07-25
|
||||
---
|
||||
- CHANGE: 重构OCI镜像代理部分, 完善对`ghcr`,`gcr`,`k8s.gcr`等上游源特殊处理的适配
|
||||
|
||||
4.2.2-rc.0 - 2025-07-25
|
||||
---
|
||||
- PRE-RELEASE: v4.2.2-rc.0是v4.2.2预发布版本,请勿在生产环境中使用;
|
||||
- CHANGE: 重构OCI镜像代理部分, 完善对`ghcr`,`gcr`,`k8s.gcr`等上游源特殊处理的适配
|
||||
|
||||
4.2.1 - 2025-07-25
|
||||
---
|
||||
- CHANGE: 更新主题样式, 新增`free`主题, `design`与`hub`主题样式更新
|
||||
|
||||
4.2.0 - 2025-07-22
|
||||
---
|
||||
- CHANGE: 支持根据IP(CDIR)进行白名单与屏蔽
|
||||
- CHANGE: 进一步推进`json/v2`支持
|
||||
|
||||
4.2.0-rc.0 - 2025-07-22
|
||||
---
|
||||
- PRE-RELEASE: v4.2.0-rc.0是v4.2.0预发布版本,请勿在生产环境中使用;
|
||||
- CHANGE: 支持根据IP(CDIR)进行白名单与屏蔽
|
||||
- CHANGE: 深化json/v2改革, 预备go1.25 json/v2
|
||||
|
||||
4.1.7 - 2025-07-20
|
||||
---
|
||||
- CHANGE: 更新相关依赖
|
||||
|
||||
@@ -1 +1 @@
|
||||
4.1.7-rc.0
|
||||
4.3.5-rc.0
|
||||
@@ -36,6 +36,8 @@
|
||||
|
||||
[相关文章](https://blog.wjqserver.com/categories/my-program/)
|
||||
|
||||
代理相关推广: [Thordata](https://www.thordata.com/?ls=github&lk=WJQserver),市面上最具性价比的代理服务商,便宜好用,来自全球195个国家城市的6000万IP,轮换住宅/原生ISP/无限量仅从$0.65/GB 起,新用户$1=5GB .联系客户可获得免费测试.
|
||||
|
||||
### 使用示例
|
||||
|
||||
```bash
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"encoding/json"
|
||||
"github.com/go-json-experiment/json"
|
||||
)
|
||||
|
||||
type Blacklist struct {
|
||||
|
||||
60
auth/ipfilter.go
Normal file
60
auth/ipfilter.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"ghproxy/config"
|
||||
"os"
|
||||
|
||||
"github.com/go-json-experiment/json"
|
||||
"github.com/go-json-experiment/json/jsontext"
|
||||
)
|
||||
|
||||
func ReadIPFilterList(cfg *config.Config) (whitelist []string, blacklist []string, err error) {
|
||||
if cfg.IPFilter.IPFilterFile == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// 检查文件是否存在, 不存在则创建空json
|
||||
if _, err := os.Stat(cfg.IPFilter.IPFilterFile); os.IsNotExist(err) {
|
||||
if err := CreateEmptyIPFilterFile(cfg.IPFilter.IPFilterFile); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create empty IP filter file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(cfg.IPFilter.IPFilterFile)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to read IP filter file: %w", err)
|
||||
}
|
||||
|
||||
var ipFilterData struct {
|
||||
AllowList []string `json:"allow"`
|
||||
BlockList []string `json:"block"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &ipFilterData); err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid IP filter file format: %w", err)
|
||||
}
|
||||
|
||||
return ipFilterData.AllowList, ipFilterData.BlockList, nil
|
||||
}
|
||||
|
||||
// 创建空列表json
|
||||
func CreateEmptyIPFilterFile(filePath string) error {
|
||||
emptyData := struct {
|
||||
AllowList []string `json:"allow"`
|
||||
BlockList []string `json:"block"`
|
||||
}{
|
||||
AllowList: []string{},
|
||||
BlockList: []string{},
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(emptyData, jsontext.Multiline(true), jsontext.WithIndent(" "))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal empty IP filter data: %w", err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(filePath, jsonData, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write empty IP filter file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,12 +1,13 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"ghproxy/config"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/go-json-experiment/json"
|
||||
)
|
||||
|
||||
// Whitelist 用于存储白名单信息
|
||||
|
||||
219
config/config.go
219
config/config.go
@@ -1,24 +1,31 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
|
||||
"github.com/WJQSERVER/wanf"
|
||||
)
|
||||
|
||||
// Config 结构体定义了整个应用程序的配置
|
||||
type Config struct {
|
||||
Server ServerConfig
|
||||
Httpc HttpcConfig
|
||||
GitClone GitCloneConfig
|
||||
Shell ShellConfig
|
||||
Pages PagesConfig
|
||||
Log LogConfig
|
||||
Auth AuthConfig
|
||||
Blacklist BlacklistConfig
|
||||
Whitelist WhitelistConfig
|
||||
RateLimit RateLimitConfig
|
||||
Outbound OutboundConfig
|
||||
Docker DockerConfig
|
||||
Server ServerConfig `toml:"server" wanf:"server"`
|
||||
Httpc HttpcConfig `toml:"httpc" wanf:"httpc"`
|
||||
GitClone GitCloneConfig `toml:"gitclone" wanf:"gitclone"`
|
||||
Shell ShellConfig `toml:"shell" wanf:"shell"`
|
||||
Pages PagesConfig `toml:"pages" wanf:"pages"`
|
||||
Log LogConfig `toml:"log" wanf:"log"`
|
||||
Auth AuthConfig `toml:"auth" wanf:"auth"`
|
||||
Blacklist BlacklistConfig `toml:"blacklist" wanf:"blacklist"`
|
||||
Whitelist WhitelistConfig `toml:"whitelist" wanf:"whitelist"`
|
||||
IPFilter IPFilterConfig `toml:"ipFilter" wanf:"ipFilter"`
|
||||
RateLimit RateLimitConfig `toml:"rateLimit" wanf:"rateLimit"`
|
||||
Outbound OutboundConfig `toml:"outbound" wanf:"outbound"`
|
||||
Docker DockerConfig `toml:"docker" wanf:"docker"`
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -31,13 +38,14 @@ cors = "*" # "*"/"" -> "*" ; "nil" -> "" ;
|
||||
debug = false
|
||||
*/
|
||||
|
||||
// ServerConfig 定义服务器相关的配置
|
||||
type ServerConfig struct {
|
||||
Port int `toml:"port"`
|
||||
Host string `toml:"host"`
|
||||
SizeLimit int `toml:"sizeLimit"`
|
||||
MemLimit int64 `toml:"memLimit"`
|
||||
Cors string `toml:"cors"`
|
||||
Debug bool `toml:"debug"`
|
||||
Port int `toml:"port" wanf:"port"`
|
||||
Host string `toml:"host" wanf:"host"`
|
||||
SizeLimit int `toml:"sizeLimit" wanf:"sizeLimit"`
|
||||
MemLimit int64 `toml:"memLimit" wanf:"memLimit"`
|
||||
Cors string `toml:"cors" wanf:"cors"`
|
||||
Debug bool `toml:"debug" wanf:"debug"`
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -48,12 +56,13 @@ maxIdleConnsPerHost = 60 # only for advanced mode
|
||||
maxConnsPerHost = 0 # only for advanced mode
|
||||
useCustomRawHeaders = false
|
||||
*/
|
||||
// HttpcConfig 定义 HTTP 客户端相关的配置
|
||||
type HttpcConfig struct {
|
||||
Mode string `toml:"mode"`
|
||||
MaxIdleConns int `toml:"maxIdleConns"`
|
||||
MaxIdleConnsPerHost int `toml:"maxIdleConnsPerHost"`
|
||||
MaxConnsPerHost int `toml:"maxConnsPerHost"`
|
||||
UseCustomRawHeaders bool `toml:"useCustomRawHeaders"`
|
||||
Mode string `toml:"mode" wanf:"mode"`
|
||||
MaxIdleConns int `toml:"maxIdleConns" wanf:"maxIdleConns"`
|
||||
MaxIdleConnsPerHost int `toml:"maxIdleConnsPerHost" wanf:"maxIdleConnsPerHost"`
|
||||
MaxConnsPerHost int `toml:"maxConnsPerHost" wanf:"maxConnsPerHost"`
|
||||
UseCustomRawHeaders bool `toml:"useCustomRawHeaders" wanf:"useCustomRawHeaders"`
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -63,11 +72,12 @@ smartGitAddr = "http://127.0.0.1:8080"
|
||||
//cacheTimeout = 10
|
||||
ForceH2C = true
|
||||
*/
|
||||
// GitCloneConfig 定义 Git 克隆相关的配置
|
||||
type GitCloneConfig struct {
|
||||
Mode string `toml:"mode"`
|
||||
SmartGitAddr string `toml:"smartGitAddr"`
|
||||
Mode string `toml:"mode" wanf:"mode"`
|
||||
SmartGitAddr string `toml:"smartGitAddr" wanf:"smartGitAddr"`
|
||||
//CacheTimeout int `toml:"cacheTimeout"`
|
||||
ForceH2C bool `toml:"ForceH2C"`
|
||||
ForceH2C bool `toml:"ForceH2C" wanf:"ForceH2C"`
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -75,9 +85,10 @@ type GitCloneConfig struct {
|
||||
editor = true
|
||||
rewriteAPI = false
|
||||
*/
|
||||
// ShellConfig 定义 Shell 相关的配置
|
||||
type ShellConfig struct {
|
||||
Editor bool `toml:"editor"`
|
||||
RewriteAPI bool `toml:"rewriteAPI"`
|
||||
Editor bool `toml:"editor" wanf:"editor"`
|
||||
RewriteAPI bool `toml:"rewriteAPI" wanf:"rewriteAPI"`
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -86,16 +97,18 @@ mode = "internal" # "internal" or "external"
|
||||
theme = "bootstrap" # "bootstrap" or "nebula"
|
||||
staticDir = "/data/www"
|
||||
*/
|
||||
// PagesConfig 定义静态页面相关的配置
|
||||
type PagesConfig struct {
|
||||
Mode string `toml:"mode"`
|
||||
Theme string `toml:"theme"`
|
||||
StaticDir string `toml:"staticDir"`
|
||||
Mode string `toml:"mode" wanf:"mode"`
|
||||
Theme string `toml:"theme" wanf:"theme"`
|
||||
StaticDir string `toml:"staticDir" wanf:"staticDir"`
|
||||
}
|
||||
|
||||
// LogConfig 定义日志相关的配置
|
||||
type LogConfig struct {
|
||||
LogFilePath string `toml:"logFilePath"`
|
||||
MaxLogSize int64 `toml:"maxLogSize"`
|
||||
Level string `toml:"level"`
|
||||
LogFilePath string `toml:"logFilePath" wanf:"logFilePath"`
|
||||
MaxLogSize int64 `toml:"maxLogSize" wanf:"maxLogSize"`
|
||||
Level string `toml:"level" wanf:"level"`
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -108,24 +121,35 @@ passThrough = false
|
||||
ForceAllowApi = false
|
||||
ForceAllowApiPassList = false
|
||||
*/
|
||||
// AuthConfig 定义认证相关的配置
|
||||
type AuthConfig struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
Method string `toml:"method"`
|
||||
Key string `toml:"key"`
|
||||
Token string `toml:"token"`
|
||||
PassThrough bool `toml:"passThrough"`
|
||||
ForceAllowApi bool `toml:"ForceAllowApi"`
|
||||
ForceAllowApiPassList bool `toml:"ForceAllowApiPassList"`
|
||||
Enabled bool `toml:"enabled" wanf:"enabled"`
|
||||
Method string `toml:"method" wanf:"method"`
|
||||
Key string `toml:"key" wanf:"key"`
|
||||
Token string `toml:"token" wanf:"token"`
|
||||
PassThrough bool `toml:"passThrough" wanf:"passThrough"`
|
||||
ForceAllowApi bool `toml:"ForceAllowApi" wanf:"ForceAllowApi"`
|
||||
ForceAllowApiPassList bool `toml:"ForceAllowApiPassList" wanf:"ForceAllowApiPassList"`
|
||||
}
|
||||
|
||||
// BlacklistConfig 定义黑名单相关的配置
|
||||
type BlacklistConfig struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
BlacklistFile string `toml:"blacklistFile"`
|
||||
Enabled bool `toml:"enabled" wanf:"enabled"`
|
||||
BlacklistFile string `toml:"blacklistFile" wanf:"blacklistFile"`
|
||||
}
|
||||
|
||||
// WhitelistConfig 定义白名单相关的配置
|
||||
type WhitelistConfig struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
WhitelistFile string `toml:"whitelistFile"`
|
||||
Enabled bool `toml:"enabled" wanf:"enabled"`
|
||||
WhitelistFile string `toml:"whitelistFile" wanf:"whitelistFile"`
|
||||
}
|
||||
|
||||
// IPFilterConfig 定义 IP 过滤相关的配置
|
||||
type IPFilterConfig struct {
|
||||
Enabled bool `toml:"enabled" wanf:"enabled"`
|
||||
EnableAllowList bool `toml:"enableAllowList" wanf:"enableAllowList"`
|
||||
EnableBlockList bool `toml:"enableBlockList" wanf:"enableBlockList"`
|
||||
IPFilterFile string `toml:"ipFilterFile" wanf:"ipFilterFile"`
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -142,19 +166,21 @@ burst = 10
|
||||
singleBurst = "10mbps"
|
||||
*/
|
||||
|
||||
// RateLimitConfig 定义限速相关的配置
|
||||
type RateLimitConfig struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
RatePerMinute int `toml:"ratePerMinute"`
|
||||
Burst int `toml:"burst"`
|
||||
BandwidthLimit BandwidthLimitConfig
|
||||
Enabled bool `toml:"enabled" wanf:"enabled"`
|
||||
RatePerMinute int `toml:"ratePerMinute" wanf:"ratePerMinute"`
|
||||
Burst int `toml:"burst" wanf:"burst"`
|
||||
BandwidthLimit BandwidthLimitConfig `toml:"bandwidthLimit" wanf:"bandwidthLimit"`
|
||||
}
|
||||
|
||||
// BandwidthLimitConfig 定义带宽限制相关的配置
|
||||
type BandwidthLimitConfig struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
TotalLimit string `toml:"totalLimit"`
|
||||
TotalBurst string `toml:"totalBurst"`
|
||||
SingleLimit string `toml:"singleLimit"`
|
||||
SingleBurst string `toml:"singleBurst"`
|
||||
Enabled bool `toml:"enabled" wanf:"enabled"`
|
||||
TotalLimit string `toml:"totalLimit" wanf:"totalLimit"`
|
||||
TotalBurst string `toml:"totalBurst" wanf:"totalBurst"`
|
||||
SingleLimit string `toml:"singleLimit" wanf:"singleLimit"`
|
||||
SingleBurst string `toml:"singleBurst" wanf:"singleBurst"`
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -162,9 +188,10 @@ type BandwidthLimitConfig struct {
|
||||
enabled = false
|
||||
url = "socks5://127.0.0.1:1080" # "http://127.0.0.1:7890"
|
||||
*/
|
||||
// OutboundConfig 定义出站代理相关的配置
|
||||
type OutboundConfig struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
Url string `toml:"url"`
|
||||
Enabled bool `toml:"enabled" wanf:"enabled"`
|
||||
Url string `toml:"url" wanf:"url"`
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -176,17 +203,19 @@ auth = false
|
||||
user1 = "testpass"
|
||||
test = "test123"
|
||||
*/
|
||||
// DockerConfig 定义 Docker 相关的配置
|
||||
type DockerConfig struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
Target string `toml:"target"`
|
||||
Auth bool `toml:"auth"`
|
||||
Credentials map[string]string `toml:"credentials"`
|
||||
AuthPassThrough bool `toml:"authPassThrough"`
|
||||
Enabled bool `toml:"enabled" wanf:"enabled"`
|
||||
Target string `toml:"target" wanf:"target"`
|
||||
Auth bool `toml:"auth" wanf:"auth"`
|
||||
Credentials map[string]string `toml:"credentials" wanf:"credentials"`
|
||||
AuthPassThrough bool `toml:"authPassThrough" wanf:"authPassThrough"`
|
||||
}
|
||||
|
||||
// LoadConfig 从 TOML 配置文件加载配置
|
||||
// LoadConfig 从配置文件加载配置
|
||||
func LoadConfig(filePath string) (*Config, error) {
|
||||
if !FileExists(filePath) {
|
||||
exist, filePath2read := FileExists(filePath)
|
||||
if !exist {
|
||||
// 楔入配置文件
|
||||
err := DefaultConfig().WriteConfig(filePath)
|
||||
if err != nil {
|
||||
@@ -194,15 +223,22 @@ func LoadConfig(filePath string) (*Config, error) {
|
||||
}
|
||||
return DefaultConfig(), nil
|
||||
}
|
||||
|
||||
var config Config
|
||||
if _, err := toml.DecodeFile(filePath, &config); err != nil {
|
||||
ext := filepath.Ext(filePath2read)
|
||||
if ext == ".wanf" {
|
||||
if err := wanf.DecodeFile(filePath2read, &config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
if _, err := toml.DecodeFile(filePath2read, &config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// 写入配置文件
|
||||
// WriteConfig 写入配置文件
|
||||
func (c *Config) WriteConfig(filePath string) error {
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
@@ -210,17 +246,54 @@ func (c *Config) WriteConfig(filePath string) error {
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
ext := filepath.Ext(filePath)
|
||||
if ext == ".wanf" {
|
||||
err := wanf.NewStreamEncoder(file).Encode(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
encoder := toml.NewEncoder(file)
|
||||
return encoder.Encode(c)
|
||||
}
|
||||
|
||||
// 检测文件是否存在
|
||||
func FileExists(filename string) bool {
|
||||
// FileExists 检测文件是否存在
|
||||
func FileExists(filename string) (bool, string) {
|
||||
_, err := os.Stat(filename)
|
||||
return !os.IsNotExist(err)
|
||||
if err == nil {
|
||||
return true, filename
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
// 获取文件名(不包含路径)
|
||||
base := filepath.Base(filename)
|
||||
dir := filepath.Dir(filename)
|
||||
|
||||
// 获取扩展名
|
||||
fileNameBody := strings.TrimSuffix(base, filepath.Ext(base))
|
||||
|
||||
// 重新组合路径, 扩展名改为.wanf, 确认是否存在
|
||||
wanfFilename := filepath.Join(dir, fileNameBody+".wanf")
|
||||
|
||||
_, err = os.Stat(wanfFilename)
|
||||
if err == nil {
|
||||
// .wanf 文件存在
|
||||
fmt.Printf("\n Found .wanf file: %s\n", wanfFilename)
|
||||
return true, wanfFilename
|
||||
} else if os.IsNotExist(err) {
|
||||
// .wanf 文件不存在
|
||||
return false, ""
|
||||
} else {
|
||||
// 其他错误
|
||||
return false, ""
|
||||
}
|
||||
} else {
|
||||
return false, filename
|
||||
}
|
||||
}
|
||||
|
||||
// 默认配置结构体
|
||||
// DefaultConfig 返回默认配置结构体
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
Server: ServerConfig{
|
||||
@@ -273,6 +346,12 @@ func DefaultConfig() *Config {
|
||||
Enabled: false,
|
||||
WhitelistFile: "/data/ghproxy/config/whitelist.json",
|
||||
},
|
||||
IPFilter: IPFilterConfig{
|
||||
Enabled: false,
|
||||
IPFilterFile: "/data/ghproxy/config/ipfilter.json",
|
||||
EnableAllowList: false,
|
||||
EnableBlockList: false,
|
||||
},
|
||||
RateLimit: RateLimitConfig{
|
||||
Enabled: false,
|
||||
RatePerMinute: 100,
|
||||
|
||||
@@ -49,6 +49,12 @@ enabled = false
|
||||
enabled = false
|
||||
whitelistFile = "/data/ghproxy/config/whitelist.json"
|
||||
|
||||
[ipFilter]
|
||||
enabled = false
|
||||
enableAllowList = false
|
||||
enableBlockList = false
|
||||
ipFilterFile = "/data/ghproxy/config/ipfilter.json"
|
||||
|
||||
[rateLimit]
|
||||
enabled = false
|
||||
ratePerMinute = 180
|
||||
|
||||
11
config/ipfilter.json
Normal file
11
config/ipfilter.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"allow": [
|
||||
"127.0.0.1",
|
||||
"192.168.1.0/24",
|
||||
"::1"
|
||||
],
|
||||
"block": [
|
||||
"10.0.0.0/8",
|
||||
"192.168.1.0/24"
|
||||
]
|
||||
}
|
||||
24
go.mod
24
go.mod
@@ -1,27 +1,27 @@
|
||||
module ghproxy
|
||||
|
||||
go 1.24.5
|
||||
go 1.25.1
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.5.0
|
||||
github.com/WJQSERVER-STUDIO/httpc v0.8.1
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/time v0.12.0
|
||||
github.com/WJQSERVER-STUDIO/httpc v0.8.2
|
||||
golang.org/x/net v0.44.0
|
||||
golang.org/x/time v0.13.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/WJQSERVER-STUDIO/go-utils/iox v0.0.2
|
||||
github.com/WJQSERVER-STUDIO/go-utils/limitreader v0.0.2
|
||||
github.com/WJQSERVER/wanf v0.0.0-20250810023226-e51d9d0737ee
|
||||
github.com/fenthope/bauth v0.0.1
|
||||
github.com/fenthope/ikumi v0.0.2
|
||||
github.com/fenthope/reco v0.0.3
|
||||
github.com/fenthope/record v0.0.3
|
||||
github.com/fenthope/ipfilter v0.0.1
|
||||
github.com/fenthope/reco v0.0.4
|
||||
github.com/fenthope/record v0.0.4
|
||||
github.com/go-json-experiment/json v0.0.0-20250910080747-cc2cfa0554c3
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||
github.com/infinite-iroha/touka v0.3.1
|
||||
github.com/infinite-iroha/touka v0.3.7
|
||||
github.com/wjqserver/modembed v0.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/WJQSERVER-STUDIO/go-utils/copyb v0.0.6 // indirect
|
||||
github.com/go-json-experiment/json v0.0.0-20250714165856-be8212f5270d // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
)
|
||||
require github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
|
||||
38
go.sum
38
go.sum
@@ -1,30 +1,36 @@
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/WJQSERVER-STUDIO/go-utils/copyb v0.0.6 h1:/50VJYXd6jcu+p5BnEBDyiX0nAyGxas1W3DCnrYMxMY=
|
||||
github.com/WJQSERVER-STUDIO/go-utils/copyb v0.0.6/go.mod h1:FZ6XE+4TKy4MOfX1xWKe6Rwsg0ucYFCdNh1KLvyKTfc=
|
||||
github.com/WJQSERVER-STUDIO/go-utils/iox v0.0.2 h1:AiIHXP21LpK7pFfqUlUstgQEWzjbekZgxOuvVwiMfyM=
|
||||
github.com/WJQSERVER-STUDIO/go-utils/iox v0.0.2/go.mod h1:mCLqYU32bTmEE6dpj37MKKiZgz70Jh/xyK9vVbq6pok=
|
||||
github.com/WJQSERVER-STUDIO/go-utils/limitreader v0.0.2 h1:8bBkKk6E2Zr+I5szL7gyc5f0DK8N9agIJCpM1Cqw2NE=
|
||||
github.com/WJQSERVER-STUDIO/go-utils/limitreader v0.0.2/go.mod h1:yPX8xuZH+py7eLJwOYj3VVI/4/Yuy5+x8Mhq8qezcPg=
|
||||
github.com/WJQSERVER-STUDIO/httpc v0.8.1 h1:/eG8aYKL3WfQILIRbG+cbzQjPkNHEPTqfGUdQS5rtI4=
|
||||
github.com/WJQSERVER-STUDIO/httpc v0.8.1/go.mod h1:mxXBf2hqbQGNHkVy/7wfU7Xi2s09MyZpbY2hyR+4uD4=
|
||||
github.com/WJQSERVER-STUDIO/httpc v0.8.2 h1:PFPLodV0QAfGEP6915J57vIqoKu9cGuuiXG/7C9TNUk=
|
||||
github.com/WJQSERVER-STUDIO/httpc v0.8.2/go.mod h1:8WhHVRO+olDFBSvL5PC/bdMkb6U3vRdPJ4p4pnguV5Y=
|
||||
github.com/WJQSERVER/wanf v0.0.0-20250810023226-e51d9d0737ee h1:tJ31DNBn6UhWkk8fiikAQWqULODM+yBcGAEar1tzdZc=
|
||||
github.com/WJQSERVER/wanf v0.0.0-20250810023226-e51d9d0737ee/go.mod h1:q2Pyg+G+s1acMWxrbI4CwS/Yk76/BzLREEdZ8iFwUNE=
|
||||
github.com/fenthope/bauth v0.0.1 h1:+4UIQshGx3mYD4L3f2S4MLZOi5PWU7fU5GK3wsZvwzE=
|
||||
github.com/fenthope/bauth v0.0.1/go.mod h1:1fveTpgfR1p+WXQ8MXm9BfBCeNYi55j23jxCOGOvBSA=
|
||||
github.com/fenthope/ikumi v0.0.2 h1:5oaSTf/Msp7M2O3o/X20omKWEQbFhX4KV0CVF21oCdk=
|
||||
github.com/fenthope/ikumi v0.0.2/go.mod h1:IYbxzOGndZv/yRrbVMyV6dxh06X2wXCbfxrTRM1IruU=
|
||||
github.com/fenthope/reco v0.0.3 h1:RmnQ0D9a8PWtwOODawitTe4BztTnS9wYwrDbipISNq4=
|
||||
github.com/fenthope/reco v0.0.3/go.mod h1:mDkGLHte5udWTIcjQTxrABRcf56SSdxBOCLgrRDwI/Y=
|
||||
github.com/fenthope/record v0.0.3 h1:v5urgs5LAkLMlljAT/MjW8fWuRHXPnAraTem5ui7rm4=
|
||||
github.com/fenthope/record v0.0.3/go.mod h1:KFEkSc4TDZ3QIhP/wglD32uYVA6X1OUcripiao1DEE4=
|
||||
github.com/go-json-experiment/json v0.0.0-20250714165856-be8212f5270d h1:+d6m5Bjvv0/RJct1VcOw2P5bvBOGjENmxORJYnSYDow=
|
||||
github.com/go-json-experiment/json v0.0.0-20250714165856-be8212f5270d/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
|
||||
github.com/fenthope/ipfilter v0.0.1 h1:HrYAyixCMvsDAz36GRyFfyCNtrgYwzrhMcY0XV7fGcM=
|
||||
github.com/fenthope/ipfilter v0.0.1/go.mod h1:QfY0GrpG0D82HROgdH4c9eog4js42ghLIfl/iM4MvvY=
|
||||
github.com/fenthope/reco v0.0.4 h1:yo2g3aWwdoMpaZWZX4SdZOW7mCK82RQIU/YI8ZUQThM=
|
||||
github.com/fenthope/reco v0.0.4/go.mod h1:eMyS8HpdMVdJ/2WJt6Cvt8P1EH9Igzj5lSJrgc+0jeg=
|
||||
github.com/fenthope/record v0.0.4 h1:/1JHNCxiXGLL/qCh4LEGaAvhj4CcKsb6siTxjLmjdO4=
|
||||
github.com/fenthope/record v0.0.4/go.mod h1:G0a6KCiCDyX2SsC3nfzSN651fJKxH482AyJvzlnvAJU=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813233538-9b1f9ea2e11b h1:6Q4zRHXS/YLOl9Ng1b1OOOBWMidAQZR3Gel0UKPC/KU=
|
||||
github.com/go-json-experiment/json v0.0.0-20250813233538-9b1f9ea2e11b/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
|
||||
github.com/go-json-experiment/json v0.0.0-20250910080747-cc2cfa0554c3 h1:02WINGfSX5w0Mn+F28UyRoSt9uvMhKguwWMlOAh6U/0=
|
||||
github.com/go-json-experiment/json v0.0.0-20250910080747-cc2cfa0554c3/go.mod h1:uNVvRXArCGbZ508SxYYTC5v1JWoz2voff5pm25jU1Ok=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/infinite-iroha/touka v0.3.1 h1:djR9hg5MbVpT1dIz2GWo4MZ/kx3l6bJ4nrpzpvdi3uk=
|
||||
github.com/infinite-iroha/touka v0.3.1/go.mod h1:pHOYHE4AKoQ1KikHF9JYKIJ4he8um1MzgcddscjCeyg=
|
||||
github.com/infinite-iroha/touka v0.3.7 h1:bIIZW5Weh7lVpyOWh4FmyR9UOfb5FOt+cR9yQ30FJLA=
|
||||
github.com/infinite-iroha/touka v0.3.7/go.mod h1:uwkF1gTrNEgQ4P/Gwtk6WLbERehq3lzB8x1FMedyrfE=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/wjqserver/modembed v0.0.1 h1:8ZDz7t9M5DLrUFlYgBUUmrMzxWsZPmHvOazkr/T2jEs=
|
||||
github.com/wjqserver/modembed v0.0.1/go.mod h1:sYbQJMAjSBsdYQrUsuHY380XXE1CuRh8g9yyCztTXOQ=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
|
||||
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
|
||||
199
main.go
199
main.go
@@ -8,6 +8,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"ghproxy/api"
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"ghproxy/weakcache"
|
||||
|
||||
"github.com/fenthope/ikumi"
|
||||
"github.com/fenthope/ipfilter"
|
||||
"github.com/fenthope/reco"
|
||||
"github.com/fenthope/record"
|
||||
"github.com/infinite-iroha/touka"
|
||||
@@ -52,12 +54,21 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
logger *reco.Logger
|
||||
logDump = logger.Debugf
|
||||
logDebug = logger.Debugf
|
||||
logInfo = logger.Infof
|
||||
logWarning = logger.Warnf
|
||||
logError = logger.Errorf
|
||||
// supportedThemes 定义了所有支持的主题, 用于验证配置和动态加载
|
||||
supportedThemes = map[string]struct{}{
|
||||
"bootstrap": {},
|
||||
"nebula": {},
|
||||
"design": {},
|
||||
"metro": {},
|
||||
"classic": {},
|
||||
"mino": {},
|
||||
"hub": {},
|
||||
"free": {},
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
logger *reco.Logger
|
||||
)
|
||||
|
||||
func readFlag() {
|
||||
@@ -110,7 +121,7 @@ func loadConfig() {
|
||||
cfg, err = config.LoadConfig(cfgfile)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to load config: %v\n", err)
|
||||
// 如果配置文件加载失败,也显示帮助信息并退出
|
||||
// 如果配置文件加载失败, 也显示帮助信息并退出
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -149,7 +160,7 @@ func setupLogger(cfg *config.Config) {
|
||||
func setMemLimit(cfg *config.Config) {
|
||||
if cfg.Server.MemLimit > 0 {
|
||||
debug.SetMemoryLimit((cfg.Server.MemLimit) * 1024 * 1024)
|
||||
logInfo("Set Memory Limit to %d MB", cfg.Server.MemLimit)
|
||||
logger.Infof("Set Memory Limit to %d MB", cfg.Server.MemLimit)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,74 +185,76 @@ func InitReq(cfg *config.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// loadEmbeddedPages 加载嵌入式页面资源
|
||||
// initializeErrorPages 初始化嵌入的错误页面资源
|
||||
// 无论页面模式(internal/external)如何, 都应执行此操作, 以确保统一的错误页面处理
|
||||
func initializeErrorPages() {
|
||||
pageFS := modembed.NewModTimeFS(pagesFS, time.Now())
|
||||
if err := proxy.InitErrPagesFS(pageFS); err != nil {
|
||||
// 这是一个警告而不是致命错误, 因为即使没有自定义错误页面, 服务器也能运行
|
||||
logger.Warnf("failed to initialize embedded error pages: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// loadEmbeddedPages 使用 map 替代 switch, 动态加载嵌入式页面和资源文件系统
|
||||
func loadEmbeddedPages(cfg *config.Config) (fs.FS, fs.FS, error) {
|
||||
pageFS := modembed.NewModTimeFS(pagesFS, time.Now())
|
||||
var pages fs.FS
|
||||
var err error
|
||||
switch cfg.Pages.Theme {
|
||||
case "bootstrap":
|
||||
pages, err = fs.Sub(pageFS, "pages/bootstrap")
|
||||
case "nebula":
|
||||
pages, err = fs.Sub(pageFS, "pages/nebula")
|
||||
case "design":
|
||||
pages, err = fs.Sub(pageFS, "pages/design")
|
||||
case "metro":
|
||||
pages, err = fs.Sub(pageFS, "pages/metro")
|
||||
case "classic":
|
||||
pages, err = fs.Sub(pageFS, "pages/classic")
|
||||
case "mino":
|
||||
pages, err = fs.Sub(pageFS, "pages/mino")
|
||||
case "hub":
|
||||
pages, err = fs.Sub(pageFS, "pages/hub")
|
||||
default:
|
||||
pages, err = fs.Sub(pageFS, "pages/design") // 默认主题
|
||||
logWarning("Invalid Pages Theme: %s, using default theme 'design'", cfg.Pages.Theme)
|
||||
theme := cfg.Pages.Theme
|
||||
|
||||
// 检查主题是否受支持, 如果不支持则使用默认主题
|
||||
if _, ok := supportedThemes[theme]; !ok {
|
||||
logger.Warnf("Invalid Pages Theme: %s, using default theme 'design'", theme)
|
||||
theme = "design" // 默认主题
|
||||
}
|
||||
|
||||
// 从嵌入式文件系统中获取主题子目录
|
||||
themePath := fmt.Sprintf("pages/%s", theme)
|
||||
pages, err := fs.Sub(pageFS, themePath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load embedded pages: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to load embedded theme '%s': %w", theme, err)
|
||||
}
|
||||
|
||||
// 初始化errPagesFs
|
||||
errPagesInitErr := proxy.InitErrPagesFS(pageFS)
|
||||
if errPagesInitErr != nil {
|
||||
logWarning("errPagesInitErr: %s", errPagesInitErr)
|
||||
}
|
||||
|
||||
var assets fs.FS
|
||||
assets, err = fs.Sub(pageFS, "pages/assets")
|
||||
// 加载共享资源文件
|
||||
assets, err := fs.Sub(pageFS, "pages/assets")
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load embedded assets: %w", err)
|
||||
}
|
||||
|
||||
return pages, assets, nil
|
||||
}
|
||||
|
||||
// setupPages 设置页面路由
|
||||
// setupPages 设置页面路由, 增强了错误处理
|
||||
func setupPages(cfg *config.Config, r *touka.Engine) {
|
||||
switch cfg.Pages.Mode {
|
||||
case "internal":
|
||||
err := setInternalRoute(cfg, r)
|
||||
if err != nil {
|
||||
logError("Failed when processing internal pages: %s", err)
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
logger.Errorf("Failed to set up internal pages, server cannot start: %s", err)
|
||||
fmt.Printf("Failed to set up internal pages, server cannot start: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
case "external":
|
||||
r.SetUnMatchFS(http.Dir(cfg.Pages.StaticDir))
|
||||
|
||||
if cfg.Pages.StaticDir == "" {
|
||||
logger.Errorf("Pages Mode is 'external' but StaticDir is empty. Using embedded pages instead.")
|
||||
err := setInternalRoute(cfg, r)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to load embedded pages: %s", err)
|
||||
fmt.Printf("Failed to load embedded pages: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
extPageFS := os.DirFS(cfg.Pages.StaticDir)
|
||||
r.SetUnMatchFS(http.FS(extPageFS))
|
||||
}
|
||||
default:
|
||||
// 处理无效的Pages Mode
|
||||
logWarning("Invalid Pages Mode: %s, using default embedded theme", cfg.Pages.Mode)
|
||||
|
||||
logger.Warnf("Invalid Pages Mode: %s, using default embedded theme", cfg.Pages.Mode)
|
||||
err := setInternalRoute(cfg, r)
|
||||
if err != nil {
|
||||
logError("Failed when processing internal pages: %s", err)
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
logger.Errorf("Failed to set up internal pages, server cannot start: %s", err)
|
||||
fmt.Printf("Failed to set up internal pages, server cannot start: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,11 +276,9 @@ func viaHeader() func(c *touka.Context) {
|
||||
}
|
||||
|
||||
func setInternalRoute(cfg *config.Config, r *touka.Engine) error {
|
||||
|
||||
// 加载嵌入式资源
|
||||
pages, assets, err := loadEmbeddedPages(cfg)
|
||||
if err != nil {
|
||||
logError("Failed when processing pages: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -285,13 +296,13 @@ func init() {
|
||||
readFlag()
|
||||
flag.Parse()
|
||||
|
||||
// 如果设置了 -h,则显示帮助信息并退出
|
||||
// 如果设置了 -h, 则显示帮助信息并退出
|
||||
if showHelp {
|
||||
flag.Usage()
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// 如果设置了 -v,则显示版本号并退出
|
||||
// 如果设置了 -v, 则显示版本号并退出
|
||||
if showVersion {
|
||||
fmt.Printf("GHProxy Version: %s \n", version)
|
||||
os.Exit(0)
|
||||
@@ -300,6 +311,7 @@ func init() {
|
||||
loadConfig()
|
||||
if cfg != nil { // 在setupLogger前添加空值检查
|
||||
setupLogger(cfg)
|
||||
initializeErrorPages()
|
||||
InitReq(cfg)
|
||||
setMemLimit(cfg)
|
||||
loadlist(cfg)
|
||||
@@ -314,7 +326,7 @@ func init() {
|
||||
}
|
||||
|
||||
if cfg.Server.Debug {
|
||||
version = "Dev" // 如果是Debug模式,版本设置为"Dev"
|
||||
version = "Dev" // 如果是Debug模式, 版本设置为"Dev"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -366,13 +378,67 @@ func main() {
|
||||
Burst: cfg.RateLimit.Burst,
|
||||
}))
|
||||
}
|
||||
|
||||
if cfg.IPFilter.Enabled {
|
||||
var err error
|
||||
ipAllowList, ipBlockList, err := auth.ReadIPFilterList(cfg)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to read IP filter list: %v\n", err)
|
||||
logger.Errorf("Failed to read IP filter list: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
ipBlockFilter, err := ipfilter.NewIPFilter(ipfilter.IPFilterConfig{
|
||||
EnableAllowList: cfg.IPFilter.EnableAllowList,
|
||||
EnableBlockList: cfg.IPFilter.EnableBlockList,
|
||||
AllowList: ipAllowList,
|
||||
BlockList: ipBlockList,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to initialize IP filter: %v\n", err)
|
||||
logger.Errorf("Failed to initialize IP filter: %v", err)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
r.Use(ipBlockFilter)
|
||||
}
|
||||
}
|
||||
setupApi(cfg, r, version)
|
||||
setupPages(cfg, r)
|
||||
r.SetRedirectTrailingSlash(false)
|
||||
|
||||
r.GET("/github.com/:user/:repo/releases/*filepath", func(c *touka.Context) {
|
||||
c.Set("matcher", "releases")
|
||||
proxy.RoutingHandler(cfg)(c)
|
||||
// 规范化路径: 移除前导斜杠, 简化后续处理
|
||||
filepath := c.Param("filepath")
|
||||
if len(filepath) > 0 && filepath[0] == '/' {
|
||||
filepath = filepath[1:]
|
||||
}
|
||||
|
||||
isValidDownload := false
|
||||
|
||||
// 检查两种合法的下载链接格式
|
||||
// 情况 A: "download/..."
|
||||
if strings.HasPrefix(filepath, "download/") {
|
||||
isValidDownload = true
|
||||
} else {
|
||||
// 情况 B: ":tag/download/..."
|
||||
slashIndex := strings.IndexByte(filepath, '/')
|
||||
// 确保 tag 部分存在 (slashIndex > 0)
|
||||
if slashIndex > 0 {
|
||||
pathAfterTag := filepath[slashIndex+1:]
|
||||
if strings.HasPrefix(pathAfterTag, "download/") {
|
||||
isValidDownload = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 根据匹配结果执行最终操作
|
||||
if isValidDownload {
|
||||
c.Set("matcher", "releases")
|
||||
proxy.RoutingHandler(cfg)(c)
|
||||
} else {
|
||||
// 任何不符合下载链接格式的 'releases' 路径都被视为浏览页面并拒绝
|
||||
proxy.ErrorPage(c, proxy.NewErrorWithStatusLookup(400, "unsupported releases page, only download links are allowed"))
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
r.GET("/github.com/:user/:repo/archive/*filepath", func(c *touka.Context) {
|
||||
@@ -418,20 +484,11 @@ func main() {
|
||||
proxy.RoutingHandler(cfg)(c)
|
||||
})
|
||||
|
||||
r.GET("/v2/",
|
||||
r.ANY("/v2/*path",
|
||||
r.UseIf(cfg.Docker.Auth, func() touka.HandlerFunc {
|
||||
return bauth.BasicAuthForStatic(cfg.Docker.Credentials, "GHProxy Docker Proxy")
|
||||
}),
|
||||
func(c *touka.Context) {
|
||||
emptyJSON := "{}"
|
||||
c.Header("Content-Type", "application/json")
|
||||
c.Header("Content-Length", fmt.Sprint(len(emptyJSON)))
|
||||
|
||||
c.Header("Docker-Distribution-API-Version", "registry/2.0")
|
||||
|
||||
c.Status(200)
|
||||
c.Writer.Write([]byte(emptyJSON))
|
||||
},
|
||||
proxy.OciWithImageRouting(cfg),
|
||||
)
|
||||
|
||||
r.GET("/v2", func(c *touka.Context) {
|
||||
@@ -439,10 +496,6 @@ func main() {
|
||||
c.Redirect(http.StatusMovedPermanently, "/v2/")
|
||||
})
|
||||
|
||||
r.ANY("/v2/:target/:user/:repo/*filepath", func(c *touka.Context) {
|
||||
proxy.GhcrWithImageRouting(cfg)(c)
|
||||
})
|
||||
|
||||
r.NoRoute(func(c *touka.Context) {
|
||||
proxy.NoRouteHandler(cfg)(c)
|
||||
})
|
||||
@@ -466,7 +519,7 @@ func main() {
|
||||
addr := fmt.Sprintf("%s:%d", cfg.Server.Host, cfg.Server.Port)
|
||||
err := r.RunShutdown(addr)
|
||||
if err != nil {
|
||||
logError("Server Run Error: %v", err)
|
||||
logger.Errorf("Server Run Error: %v", err)
|
||||
fmt.Printf("Server Run Error: %v\n", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -127,18 +127,14 @@ func ChunkedProxyRequest(ctx context.Context, c *touka.Context, u string, cfg *c
|
||||
defer bodyReader.Close()
|
||||
|
||||
if MatcherShell(u) && matchString(matcher) && cfg.Shell.Editor {
|
||||
// 判断body是不是gzip
|
||||
var compress string
|
||||
if resp.Header.Get("Content-Encoding") == "gzip" {
|
||||
compress = "gzip"
|
||||
}
|
||||
|
||||
c.Debugf("Use Shell Editor: %s %s %s %s %s", c.ClientIP(), c.Request.Method, u, c.UserAgent(), c.Request.Proto)
|
||||
c.Header("Content-Length", "")
|
||||
c.DelHeader("Content-Length")
|
||||
c.DelHeader("Content-Encoding")
|
||||
|
||||
var reader io.Reader
|
||||
|
||||
reader, _, err = processLinks(bodyReader, compress, c.Request.Host, cfg, c)
|
||||
reader, _, err = processLinks(bodyReader, c.Request.Host, cfg, c, bodySize)
|
||||
c.WriteStream(reader)
|
||||
if err != nil {
|
||||
c.Errorf("%s %s %s %s %s Failed to copy response body: %v", c.ClientIP(), c.Request.Method, u, c.UserAgent(), c.Request.Proto, err)
|
||||
@@ -146,7 +142,6 @@ func ChunkedProxyRequest(ctx context.Context, c *touka.Context, u string, cfg *c
|
||||
return
|
||||
}
|
||||
} else {
|
||||
|
||||
if contentLength != "" {
|
||||
c.SetHeader("Content-Length", contentLength)
|
||||
c.WriteStream(bodyReader)
|
||||
|
||||
422
proxy/docker.go
422
proxy/docker.go
@@ -1,20 +1,21 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/infinite-iroha/touka"
|
||||
|
||||
"ghproxy/config"
|
||||
"ghproxy/weakcache"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"ghproxy/config"
|
||||
"ghproxy/weakcache"
|
||||
|
||||
"github.com/WJQSERVER-STUDIO/go-utils/iox"
|
||||
"github.com/WJQSERVER-STUDIO/go-utils/limitreader"
|
||||
"github.com/go-json-experiment/json"
|
||||
"github.com/infinite-iroha/touka"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -22,85 +23,186 @@ var (
|
||||
ghcrTarget = "ghcr.io"
|
||||
)
|
||||
|
||||
// cache 用于存储认证令牌, 避免重复获取
|
||||
var cache *weakcache.Cache[string]
|
||||
|
||||
// imageInfo 结构体用于存储镜像的相关信息
|
||||
type imageInfo struct {
|
||||
User string
|
||||
Repo string
|
||||
Image string
|
||||
}
|
||||
|
||||
// InitWeakCache 初始化弱引用缓存
|
||||
func InitWeakCache() *weakcache.Cache[string] {
|
||||
// 使用默认过期时间和容量为100创建一个新的弱引用缓存
|
||||
cache = weakcache.NewCache[string](weakcache.DefaultExpiration, 100)
|
||||
return cache
|
||||
}
|
||||
|
||||
func GhcrWithImageRouting(cfg *config.Config) touka.HandlerFunc {
|
||||
var (
|
||||
authEndpoint = "/"
|
||||
passTypeMap = map[string]struct{}{
|
||||
"manifests": {},
|
||||
"blobs": {},
|
||||
"tags": {},
|
||||
"index": {},
|
||||
}
|
||||
)
|
||||
|
||||
// 处理路径各种情况
|
||||
func OciWithImageRouting(cfg *config.Config) touka.HandlerFunc {
|
||||
return func(c *touka.Context) {
|
||||
if !cfg.Docker.Enabled {
|
||||
ErrorPage(c, NewErrorWithStatusLookup(403, "Docker proxy is not enabled"))
|
||||
return
|
||||
}
|
||||
var (
|
||||
p1 string
|
||||
p2 string
|
||||
p3 string
|
||||
p4 string
|
||||
target string
|
||||
user string
|
||||
repo string
|
||||
extpath string
|
||||
p1IsTarget bool
|
||||
ignorep3 bool
|
||||
imageNameForAuth string
|
||||
finalreqUrl string
|
||||
iInfo *imageInfo
|
||||
)
|
||||
ociPath := c.Param("path")
|
||||
if ociPath == authEndpoint {
|
||||
emptyJSON := "{}"
|
||||
c.Header("Content-Type", "application/json")
|
||||
c.Header("Content-Length", fmt.Sprint(len(emptyJSON)))
|
||||
|
||||
charToFind := '.'
|
||||
reqTarget := c.Param("target")
|
||||
reqImageUser := c.Param("user")
|
||||
reqImageName := c.Param("repo")
|
||||
reqFilePath := c.Param("filepath")
|
||||
c.Header("Docker-Distribution-API-Version", "registry/2.0")
|
||||
|
||||
path := fmt.Sprintf("%s/%s/%s", reqImageUser, reqImageName, reqFilePath)
|
||||
target := ""
|
||||
c.Status(200)
|
||||
c.Writer.Write([]byte(emptyJSON))
|
||||
return
|
||||
}
|
||||
|
||||
if strings.ContainsRune(reqTarget, charToFind) {
|
||||
switch reqTarget {
|
||||
case "docker.io":
|
||||
// 根据/分割 /:target/:user/:repo/*ext
|
||||
ociPath = ociPath[1:]
|
||||
i := strings.IndexByte(ociPath, '/')
|
||||
if i <= 0 {
|
||||
ErrorPage(c, NewErrorWithStatusLookup(404, "Not Found"))
|
||||
return
|
||||
}
|
||||
p1 = ociPath[:i]
|
||||
|
||||
// 开始判断p1是否为target
|
||||
if strings.Contains(p1, ".") || strings.Contains(p1, ":") {
|
||||
p1IsTarget = true
|
||||
if p1 == "docker.io" {
|
||||
target = dockerhubTarget
|
||||
case "ghcr.io":
|
||||
target = ghcrTarget
|
||||
default:
|
||||
target = reqTarget
|
||||
} else {
|
||||
target = p1
|
||||
}
|
||||
} else {
|
||||
path = c.GetRequestURI()
|
||||
reqImageUser = c.Param("target")
|
||||
reqImageName = c.Param("user")
|
||||
}
|
||||
image := &imageInfo{
|
||||
User: reqImageUser,
|
||||
Repo: reqImageName,
|
||||
Image: fmt.Sprintf("%s/%s", reqImageUser, reqImageName),
|
||||
switch cfg.Docker.Target {
|
||||
case "ghcr":
|
||||
target = ghcrTarget
|
||||
case "dockerhub":
|
||||
target = dockerhubTarget
|
||||
case "":
|
||||
ErrorPage(c, NewErrorWithStatusLookup(500, "Default Docker Target is not configured in config file"))
|
||||
return
|
||||
default:
|
||||
target = cfg.Docker.Target
|
||||
}
|
||||
}
|
||||
|
||||
GhcrToTarget(c, cfg, target, path, image)
|
||||
ociPath = ociPath[i+1:]
|
||||
i = strings.IndexByte(ociPath, '/')
|
||||
if i <= 0 {
|
||||
ErrorPage(c, NewErrorWithStatusLookup(404, "Not Found"))
|
||||
return
|
||||
}
|
||||
p2 = ociPath[:i]
|
||||
ociPath = ociPath[i+1:]
|
||||
|
||||
}
|
||||
// 若p2和passTypeMap匹配
|
||||
if !p1IsTarget {
|
||||
if _, ok := passTypeMap[p2]; ok {
|
||||
ignorep3 = true
|
||||
switch cfg.Docker.Target {
|
||||
case "ghcr":
|
||||
target = ghcrTarget
|
||||
case "dockerhub":
|
||||
target = dockerhubTarget
|
||||
case "":
|
||||
ErrorPage(c, NewErrorWithStatusLookup(500, "Default Docker Target is not configured in config file"))
|
||||
return
|
||||
default:
|
||||
target = cfg.Docker.Target
|
||||
}
|
||||
user = "library"
|
||||
repo = p1
|
||||
extpath = "/" + p2 + "/" + ociPath
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func GhcrToTarget(c *touka.Context, cfg *config.Config, target string, path string, image *imageInfo) {
|
||||
if cfg.Docker.Enabled {
|
||||
var ctx = c.Request.Context()
|
||||
if target != "" {
|
||||
GhcrRequest(ctx, c, "https://"+target+"/v2/"+path+"?"+c.GetReqQueryString(), image, cfg, target)
|
||||
} else {
|
||||
if cfg.Docker.Target == "ghcr" {
|
||||
GhcrRequest(ctx, c, "https://"+ghcrTarget+c.GetRequestURI(), image, cfg, ghcrTarget)
|
||||
} else if cfg.Docker.Target == "dockerhub" {
|
||||
GhcrRequest(ctx, c, "https://"+dockerhubTarget+c.GetRequestURI(), image, cfg, dockerhubTarget)
|
||||
} else if cfg.Docker.Target != "" {
|
||||
// 自定义taget
|
||||
GhcrRequest(ctx, c, "https://"+cfg.Docker.Target+c.GetRequestURI(), image, cfg, cfg.Docker.Target)
|
||||
} else {
|
||||
// 配置为空
|
||||
ErrorPage(c, NewErrorWithStatusLookup(403, "Docker Target is not set"))
|
||||
if !ignorep3 {
|
||||
i = strings.IndexByte(ociPath, '/')
|
||||
if i <= 0 {
|
||||
ErrorPage(c, NewErrorWithStatusLookup(404, "Not Found"))
|
||||
return
|
||||
}
|
||||
p3 = ociPath[:i]
|
||||
|
||||
ociPath = ociPath[i+1:]
|
||||
p4 = ociPath
|
||||
|
||||
if p1IsTarget {
|
||||
if _, ok := passTypeMap[p3]; ok {
|
||||
user = "library"
|
||||
repo = p2
|
||||
extpath = "/" + p3 + "/" + p4
|
||||
} else {
|
||||
user = p2
|
||||
repo = p3
|
||||
extpath = "/" + p4
|
||||
}
|
||||
} else {
|
||||
switch cfg.Docker.Target {
|
||||
case "ghcr":
|
||||
target = ghcrTarget
|
||||
case "dockerhub":
|
||||
target = dockerhubTarget
|
||||
case "":
|
||||
ErrorPage(c, NewErrorWithStatusLookup(500, "Default Docker Target is not configured in config file"))
|
||||
return
|
||||
default:
|
||||
target = cfg.Docker.Target
|
||||
}
|
||||
user = p1
|
||||
repo = p2
|
||||
extpath = "/" + p3 + "/" + p4
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
ErrorPage(c, NewErrorWithStatusLookup(403, "Docker is not Allowed"))
|
||||
return
|
||||
imageNameForAuth = user + "/" + repo
|
||||
finalreqUrl = "https://" + target + "/v2/" + imageNameForAuth + extpath
|
||||
if query := c.GetReqQueryString(); query != "" {
|
||||
finalreqUrl += "?" + query
|
||||
}
|
||||
|
||||
iInfo = &imageInfo{
|
||||
User: user,
|
||||
Repo: repo,
|
||||
Image: imageNameForAuth,
|
||||
}
|
||||
|
||||
GhcrRequest(c.Request.Context(), c, finalreqUrl, iInfo, cfg, target)
|
||||
}
|
||||
}
|
||||
|
||||
// GhcrRequest 执行对Docker注册表的HTTP请求, 处理认证和重定向
|
||||
func GhcrRequest(ctx context.Context, c *touka.Context, u string, image *imageInfo, cfg *config.Config, target string) {
|
||||
|
||||
var (
|
||||
method string
|
||||
req *http.Request
|
||||
@@ -108,23 +210,19 @@ func GhcrRequest(ctx context.Context, c *touka.Context, u string, image *imageIn
|
||||
err error
|
||||
)
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
if req != nil {
|
||||
req.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
method = c.Request.Method
|
||||
ghcrclient := c.GetHTTPC()
|
||||
bodyByte, err := c.GetReqBodyFull()
|
||||
if err != nil {
|
||||
HandleError(c, fmt.Sprintf("Failed to read request body: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// 构建初始请求
|
||||
rb := ghcrclient.NewRequestBuilder(method, u)
|
||||
rb.NoDefaultHeaders()
|
||||
rb.SetBody(c.Request.Body)
|
||||
rb.WithContext(ctx)
|
||||
rb.NoDefaultHeaders() // 不使用默认头部, 以便完全控制
|
||||
rb.SetBody(bytes.NewBuffer(bodyByte)) // 设置请求体
|
||||
rb.WithContext(ctx) // 设置请求上下文
|
||||
|
||||
req, err = rb.Build()
|
||||
if err != nil {
|
||||
@@ -132,80 +230,147 @@ func GhcrRequest(ctx context.Context, c *touka.Context, u string, image *imageIn
|
||||
return
|
||||
}
|
||||
|
||||
// 复制客户端请求的头部到代理请求
|
||||
copyHeader(c.Request.Header, req.Header)
|
||||
|
||||
// 确保 Accept 头部被正确设置
|
||||
if acceptHeader, ok := c.Request.Header["Accept"]; ok {
|
||||
req.Header["Accept"] = acceptHeader
|
||||
}
|
||||
|
||||
// 设置 Host 头部为上游目标
|
||||
req.Header.Set("Host", target)
|
||||
if image != nil {
|
||||
|
||||
// 尝试从缓存中获取并使用认证令牌
|
||||
if image != nil && image.Image != "" {
|
||||
token, exist := cache.Get(image.Image)
|
||||
if exist {
|
||||
c.Debugf("Use Cache Token: %s", token)
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
}
|
||||
}
|
||||
|
||||
// 发送初始请求
|
||||
resp, err = ghcrclient.Do(req)
|
||||
if err != nil {
|
||||
HandleError(c, fmt.Sprintf("Failed to send request: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
// 处理 401 Unauthorized 或 404 Not Found 响应, 尝试重新认证并重试
|
||||
if resp.StatusCode == 401 || resp.StatusCode == 404 {
|
||||
// 对于 /v2/ 的请求不进行重试, 因为它通常用于发现认证端点
|
||||
shouldRetry := string(c.GetRequestURIPath()) != "/v2/"
|
||||
originalStatusCode := resp.StatusCode
|
||||
c.Debugf("Initial request failed with status %d. Retry eligibility: %t", originalStatusCode, shouldRetry)
|
||||
|
||||
case 401:
|
||||
// 请求target /v2/路径
|
||||
if string(c.GetRequestURIPath()) != "/v2/" {
|
||||
resp.Body.Close()
|
||||
if image == nil {
|
||||
ErrorPage(c, NewErrorWithStatusLookup(401, "Unauthorized"))
|
||||
if shouldRetry {
|
||||
if image == nil || image.Image == "" {
|
||||
_ = resp.Body.Close() // 终止流程, 关闭当前响应体
|
||||
ErrorPage(c, NewErrorWithStatusLookup(originalStatusCode, "Unauthorized"))
|
||||
return
|
||||
}
|
||||
// 获取新的认证令牌
|
||||
token := ChallengeReq(target, image, ctx, c)
|
||||
|
||||
// 更新kv
|
||||
if token != "" {
|
||||
c.Debugf("Successfully obtained auth token. Retrying request.")
|
||||
_ = resp.Body.Close() // 在发起重试请求前, 关闭旧的响应体
|
||||
|
||||
// 更新kv
|
||||
c.Debugf("Update Cache Token: %s", token)
|
||||
cache.Put(image.Image, token)
|
||||
}
|
||||
|
||||
rb := ghcrclient.NewRequestBuilder(string(method), u)
|
||||
rb.NoDefaultHeaders()
|
||||
rb.SetBody(c.Request.Body)
|
||||
rb.WithContext(ctx)
|
||||
// 重新构建并发送请求
|
||||
rb_retry := ghcrclient.NewRequestBuilder(method, u)
|
||||
rb_retry.NoDefaultHeaders()
|
||||
rb_retry.SetBody(bytes.NewBuffer(bodyByte))
|
||||
rb_retry.WithContext(ctx)
|
||||
|
||||
req, err = rb.Build()
|
||||
if err != nil {
|
||||
HandleError(c, fmt.Sprintf("Failed to create request: %v", err))
|
||||
return
|
||||
}
|
||||
req_retry, err_retry := rb_retry.Build()
|
||||
if err_retry != nil {
|
||||
HandleError(c, fmt.Sprintf("Failed to create retry request: %v", err_retry))
|
||||
return
|
||||
}
|
||||
|
||||
copyHeader(c.Request.Header, req.Header)
|
||||
copyHeader(c.Request.Header, req_retry.Header) // 复制原始头部
|
||||
if acceptHeader, ok := c.Request.Header["Accept"]; ok {
|
||||
req_retry.Header["Accept"] = acceptHeader
|
||||
}
|
||||
|
||||
req.Header.Set("Host", target)
|
||||
if token != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
}
|
||||
req_retry.Header.Set("Host", target) // 设置 Host 头部
|
||||
req_retry.Header.Set("Authorization", "Bearer "+token) // 使用新令牌
|
||||
|
||||
resp, err = ghcrclient.Do(req)
|
||||
if err != nil {
|
||||
HandleError(c, fmt.Sprintf("Failed to send request: %v", err))
|
||||
return
|
||||
c.Debugf("Executing retry request. Method: %s, URL: %s", req_retry.Method, req_retry.URL.String())
|
||||
|
||||
resp_retry, err_retry := ghcrclient.Do(req_retry)
|
||||
if err_retry != nil {
|
||||
HandleError(c, fmt.Sprintf("Failed to send retry request: %v", err_retry))
|
||||
return
|
||||
}
|
||||
c.Debugf("Retry request completed with status code: %d", resp_retry.StatusCode)
|
||||
resp = resp_retry // 更新响应为重试后的响应
|
||||
} else {
|
||||
c.Warnf("Failed to obtain auth token. Cannot retry.")
|
||||
// 获取令牌失败, 将继续处理原始的401/404响应, 其响应体仍然打开
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case 404: // 错误处理(404)
|
||||
ErrorPage(c, NewErrorWithStatusLookup(404, "Page Not Found (From Github)"))
|
||||
return
|
||||
case 302, 301:
|
||||
finalURL := resp.Header.Get("Location")
|
||||
if finalURL != "" {
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
c.Errorf("Failed to close response body: %v", err)
|
||||
}
|
||||
c.Infof("Internal Redirecting to %s", finalURL)
|
||||
GhcrRequest(ctx, c, finalURL, image, cfg, target)
|
||||
// 透明地处理 302 Found 或 307 Temporary Redirect 重定向
|
||||
if resp.StatusCode == http.StatusFound || resp.StatusCode == http.StatusTemporaryRedirect {
|
||||
location := resp.Header.Get("Location")
|
||||
if location == "" {
|
||||
_ = resp.Body.Close() // 终止流程, 关闭当前响应体
|
||||
HandleError(c, "Redirect response missing Location header")
|
||||
return
|
||||
}
|
||||
|
||||
redirectURL, err := url.Parse(location)
|
||||
if err != nil {
|
||||
_ = resp.Body.Close() // 终止流程, 关闭当前响应体
|
||||
HandleError(c, fmt.Sprintf("Failed to parse redirect location: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// 如果 Location 是相对路径, 则根据原始请求的 URL 解析为绝对路径
|
||||
if !redirectURL.IsAbs() {
|
||||
originalURL := resp.Request.URL
|
||||
redirectURL = originalURL.ResolveReference(redirectURL)
|
||||
c.Debugf("Resolved relative redirect to absolute URL: %s", redirectURL.String())
|
||||
}
|
||||
|
||||
c.Debugf("Handling redirect. Status: %d, Final Location: %s", resp.StatusCode, redirectURL.String())
|
||||
_ = resp.Body.Close() // 明确关闭重定向响应的响应体, 因为我们将发起新请求
|
||||
|
||||
// 创建并发送重定向请求, 通常使用 GET 方法
|
||||
redirectReq, err := http.NewRequestWithContext(ctx, "GET", redirectURL.String(), nil)
|
||||
if err != nil {
|
||||
HandleError(c, fmt.Sprintf("Failed to create redirect request: %v", err))
|
||||
return
|
||||
}
|
||||
redirectReq.Header.Set("User-Agent", c.Request.UserAgent()) // 复制 User-Agent
|
||||
|
||||
c.Debugf("Executing redirect request to: %s", redirectURL.String())
|
||||
redirectResp, err := ghcrclient.Do(redirectReq)
|
||||
if err != nil {
|
||||
HandleError(c, fmt.Sprintf("Failed to execute redirect request to %s: %v", redirectURL.String(), err))
|
||||
return
|
||||
}
|
||||
c.Debugf("Redirect request to %s completed with status %d", redirectURL.String(), redirectResp.StatusCode)
|
||||
resp = redirectResp // 更新响应为重定向后的响应
|
||||
}
|
||||
|
||||
// 如果最终响应是 404, 则读取响应体并返回自定义错误页面
|
||||
if resp.StatusCode == 404 {
|
||||
defer resp.Body.Close() // 使用defer确保在函数返回前关闭响应体
|
||||
bodyBytes, err := iox.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
c.Warnf("Failed to read upstream 404 response body: %v", err)
|
||||
} else {
|
||||
c.Warnf("Upstream 404 response body: %s", string(bodyBytes))
|
||||
}
|
||||
ErrorPage(c, NewErrorWithStatusLookup(404, "Page Not Found (From Upstream)"))
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -214,6 +379,7 @@ func GhcrRequest(ctx context.Context, c *touka.Context, u string, image *imageIn
|
||||
sizelimit int
|
||||
)
|
||||
|
||||
// 获取配置中的大小限制并转换单位 (MB -> Byte)
|
||||
sizelimit = cfg.Server.SizeLimit * 1024 * 1024
|
||||
contentLength = resp.Header.Get("Content-Length")
|
||||
if contentLength != "" {
|
||||
@@ -221,77 +387,82 @@ func GhcrRequest(ctx context.Context, c *touka.Context, u string, image *imageIn
|
||||
bodySize, err = strconv.Atoi(contentLength)
|
||||
if err != nil {
|
||||
c.Warnf("%s %s %s %s %s Content-Length header is not a valid integer: %v", c.ClientIP(), c.Request.Method, c.Request.URL.Path, c.UserAgent(), c.Request.Proto, err)
|
||||
bodySize = -1
|
||||
bodySize = -1 // 无法解析则设置为 -1
|
||||
}
|
||||
// 如果内容大小超出限制, 返回 301 重定向到原始上游URL
|
||||
if err == nil && bodySize > sizelimit {
|
||||
finalURL := resp.Request.URL.String()
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
c.Errorf("Failed to close response body: %v", err)
|
||||
}
|
||||
_ = resp.Body.Close() // 明确关闭响应体, 因为我们将重定向而不是流式传输
|
||||
c.Redirect(301, finalURL)
|
||||
c.Warnf("%s %s %s %s %s Final-URL: %s Size-Limit-Exceeded: %d", c.ClientIP(), c.Request.Method, c.Request.URL.Path, c.UserAgent(), c.Request.Proto, finalURL, bodySize)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 将上游响应头部复制到客户端响应
|
||||
c.SetHeaders(resp.Header)
|
||||
|
||||
// 设置客户端响应状态码
|
||||
c.Status(resp.StatusCode)
|
||||
|
||||
// bodyReader 的所有权将转移给 SetBodyStream, 不再由此函数管理关闭
|
||||
bodyReader := resp.Body
|
||||
|
||||
// 如果启用了带宽限制, 则使用限速读取器
|
||||
if cfg.RateLimit.BandwidthLimit.Enabled {
|
||||
bodyReader = limitreader.NewRateLimitedReader(bodyReader, bandwidthLimit, int(bandwidthBurst), ctx)
|
||||
}
|
||||
|
||||
// 根据 Content-Length 设置响应体流
|
||||
if contentLength != "" {
|
||||
c.SetBodyStream(bodyReader, bodySize)
|
||||
return
|
||||
}
|
||||
c.SetBodyStream(bodyReader, -1)
|
||||
|
||||
}
|
||||
|
||||
// AuthToken 用于解析认证响应中的令牌
|
||||
type AuthToken struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// ChallengeReq 执行认证挑战流程, 获取新的认证令牌
|
||||
func ChallengeReq(target string, image *imageInfo, ctx context.Context, c *touka.Context) (token string) {
|
||||
var resp401 *http.Response
|
||||
var req401 *http.Request
|
||||
var err error
|
||||
ghcrclient := c.GetHTTPC()
|
||||
|
||||
// 对 /v2/ 端点发送 GET 请求以触发认证挑战
|
||||
rb401 := ghcrclient.NewRequestBuilder("GET", "https://"+target+"/v2/")
|
||||
rb401.NoDefaultHeaders()
|
||||
rb401.WithContext(ctx)
|
||||
rb401.AddHeader("User-Agent", "docker/28.1.1 go/go1.23.8 git-commit/01f442b kernel/6.12.25-amd64 os/linux arch/amd64 UpstreamClient(Docker-Client/28.1.1 ")
|
||||
req401, err = rb401.Build()
|
||||
if err != nil {
|
||||
HandleError(c, fmt.Sprintf("Failed to create request: %v", err))
|
||||
return
|
||||
}
|
||||
req401.Header.Set("Host", target)
|
||||
req401.Header.Set("Host", target) // 设置 Host 头部
|
||||
|
||||
resp401, err = ghcrclient.Do(req401)
|
||||
if err != nil {
|
||||
HandleError(c, fmt.Sprintf("Failed to send request: %v", err))
|
||||
return
|
||||
}
|
||||
defer resp401.Body.Close()
|
||||
defer resp401.Body.Close() // 确保响应体关闭
|
||||
|
||||
// 解析 Www-Authenticate 头部, 获取认证领域和参数
|
||||
bearer, err := parseBearerWWWAuthenticateHeader(resp401.Header.Get("Www-Authenticate"))
|
||||
if err != nil {
|
||||
c.Errorf("Failed to parse Www-Authenticate header: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// 构建认证范围 (scope), 通常是 repository:<image_name>:pull
|
||||
scope := fmt.Sprintf("repository:%s:pull", image.Image)
|
||||
|
||||
// 使用解析到的 Realm 和 Service, 以及 scope 请求认证令牌
|
||||
getAuthRB := ghcrclient.NewRequestBuilder("GET", bearer.Realm).
|
||||
NoDefaultHeaders().
|
||||
WithContext(ctx).
|
||||
AddHeader("User-Agent", "docker/28.1.1 go/go1.23.8 git-commit/01f442b kernel/6.12.25-amd64 os/linux arch/amd64 UpstreamClient(Docker-Client/28.1.1 ").
|
||||
SetHeader("Host", bearer.Service).
|
||||
AddQueryParam("service", bearer.Service).
|
||||
AddQueryParam("scope", scope)
|
||||
@@ -307,24 +478,23 @@ func ChallengeReq(target string, image *imageInfo, ctx context.Context, c *touka
|
||||
c.Errorf("Failed to send request: %v", err)
|
||||
return
|
||||
}
|
||||
defer authResp.Body.Close() // 确保响应体关闭
|
||||
|
||||
defer authResp.Body.Close()
|
||||
|
||||
bodyBytes, err := io.ReadAll(authResp.Body)
|
||||
// 读取认证响应体
|
||||
bodyBytes, err := iox.ReadAll(authResp.Body)
|
||||
if err != nil {
|
||||
c.Errorf("Failed to read auth response body: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// 解码json
|
||||
// 解码 JSON 响应以获取令牌
|
||||
var authToken AuthToken
|
||||
err = json.Unmarshal(bodyBytes, &authToken)
|
||||
if err != nil {
|
||||
c.Errorf("Failed to decode auth response body: %v", err)
|
||||
return
|
||||
}
|
||||
token = authToken.Token
|
||||
token = authToken.Token // 提取令牌
|
||||
|
||||
return token
|
||||
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ func HandleError(c *touka.Context, message string) {
|
||||
}
|
||||
|
||||
func UnifiedToukaErrorHandler(c *touka.Context, code int, err error) {
|
||||
|
||||
errMsg := ""
|
||||
if err != nil {
|
||||
errMsg = err.Error()
|
||||
|
||||
@@ -9,6 +9,32 @@ import (
|
||||
"github.com/infinite-iroha/touka"
|
||||
)
|
||||
|
||||
// buildHandlerPath 使用 strings.Builder 来高效地构建最终的 URL.
|
||||
// 这避免了使用标准字符串拼接时发生的多次内存分配.
|
||||
func buildHandlerPath(path, matcher string) string {
|
||||
var sb strings.Builder
|
||||
sb.Grow(len(path) + 50)
|
||||
|
||||
if matcher == "blob" && strings.HasPrefix(path, "github.com") {
|
||||
sb.WriteString("https://raw.githubusercontent.com")
|
||||
if len(path) > 10 { // len("github.com")
|
||||
pathSegment := path[10:] // skip "github.com"
|
||||
if i := strings.Index(pathSegment, "/blob/"); i != -1 {
|
||||
sb.WriteString(pathSegment[:i])
|
||||
sb.WriteString("/")
|
||||
sb.WriteString(pathSegment[i+len("/blob/"):])
|
||||
} else {
|
||||
sb.WriteString(pathSegment)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
sb.WriteString("https://")
|
||||
sb.WriteString(path)
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
var re = regexp.MustCompile(`^(http:|https:)?/?/?(.*)`) // 匹配http://或https://开头的路径
|
||||
|
||||
func NoRouteHandler(cfg *config.Config) touka.HandlerFunc {
|
||||
@@ -32,21 +58,16 @@ func NoRouteHandler(cfg *config.Config) touka.HandlerFunc {
|
||||
}
|
||||
|
||||
// 制作url
|
||||
rawPath = "https://" + matches[2]
|
||||
|
||||
var (
|
||||
user string
|
||||
repo string
|
||||
matcher string
|
||||
)
|
||||
|
||||
path := matches[2]
|
||||
var matcherErr *GHProxyErrors
|
||||
user, repo, matcher, matcherErr = Matcher(rawPath, cfg)
|
||||
user, repo, matcher, matcherErr := Matcher("https://"+path, cfg)
|
||||
if matcherErr != nil {
|
||||
ErrorPage(c, matcherErr)
|
||||
return
|
||||
}
|
||||
|
||||
rawPath = buildHandlerPath(path, matcher)
|
||||
|
||||
shoudBreak = listCheck(cfg, c, user, repo, rawPath)
|
||||
if shoudBreak {
|
||||
return
|
||||
@@ -57,11 +78,7 @@ func NoRouteHandler(cfg *config.Config) touka.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
// 处理blob/raw路径
|
||||
if matcher == "blob" {
|
||||
rawPath = rawPath[18:]
|
||||
rawPath = "https://raw.githubusercontent.com" + rawPath
|
||||
rawPath = strings.Replace(rawPath, "/blob/", "/", 1)
|
||||
matcher = "raw"
|
||||
}
|
||||
|
||||
|
||||
150
proxy/match.go
150
proxy/match.go
@@ -10,11 +10,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
githubPrefix = "https://github.com/"
|
||||
rawPrefix = "https://raw.githubusercontent.com/"
|
||||
gistPrefix = "https://gist.github.com/"
|
||||
gistContentPrefix = "https://gist.githubusercontent.com/"
|
||||
apiPrefix = "https://api.github.com/"
|
||||
githubPrefixLen int
|
||||
rawPrefixLen int
|
||||
gistPrefixLen int
|
||||
@@ -22,6 +17,16 @@ var (
|
||||
apiPrefixLen int
|
||||
)
|
||||
|
||||
const (
|
||||
githubPrefix = "https://github.com/"
|
||||
rawPrefix = "https://raw.githubusercontent.com/"
|
||||
gistPrefix = "https://gist.github.com/"
|
||||
gistContentPrefix = "https://gist.githubusercontent.com/"
|
||||
apiPrefix = "https://api.github.com/"
|
||||
ociv2Prefix = "https://v2/"
|
||||
releasesDownloadSnippet = "releases/download/"
|
||||
)
|
||||
|
||||
func init() {
|
||||
githubPrefixLen = len(githubPrefix)
|
||||
rawPrefixLen = len(rawPrefix)
|
||||
@@ -32,37 +37,70 @@ func init() {
|
||||
|
||||
// Matcher 从原始URL路径中高效地解析并匹配代理规则.
|
||||
func Matcher(rawPath string, cfg *config.Config) (string, string, string, *GHProxyErrors) {
|
||||
if len(rawPath) < 18 {
|
||||
return "", "", "", NewErrorWithStatusLookup(404, "path too short")
|
||||
}
|
||||
/*
|
||||
if len(rawPath) < 18 {
|
||||
return "", "", "", NewErrorWithStatusLookup(404, "path too short")
|
||||
}
|
||||
*/
|
||||
|
||||
// 匹配 "https://github.com/"
|
||||
if strings.HasPrefix(rawPath, githubPrefix) {
|
||||
remaining := rawPath[githubPrefixLen:]
|
||||
i := strings.IndexByte(remaining, '/')
|
||||
pathAfterDomain := rawPath[githubPrefixLen:]
|
||||
|
||||
// 解析 user
|
||||
i := strings.IndexByte(pathAfterDomain, '/')
|
||||
if i <= 0 {
|
||||
return "", "", "", NewErrorWithStatusLookup(400, "malformed github path: missing user")
|
||||
}
|
||||
user := remaining[:i]
|
||||
remaining = remaining[i+1:]
|
||||
i = strings.IndexByte(remaining, '/')
|
||||
user := pathAfterDomain[:i]
|
||||
pathAfterUser := pathAfterDomain[i+1:]
|
||||
|
||||
// 解析 repo
|
||||
i = strings.IndexByte(pathAfterUser, '/')
|
||||
if i <= 0 {
|
||||
return "", "", "", NewErrorWithStatusLookup(400, "malformed github path: missing repo")
|
||||
}
|
||||
repo := remaining[:i]
|
||||
remaining = remaining[i+1:]
|
||||
if len(remaining) == 0 {
|
||||
return "", "", "", NewErrorWithStatusLookup(400, "malformed github path: missing action")
|
||||
}
|
||||
i = strings.IndexByte(remaining, '/')
|
||||
action := remaining
|
||||
if i != -1 {
|
||||
action = remaining[:i]
|
||||
repo := pathAfterUser[:i]
|
||||
pathAfterRepo := pathAfterUser[i+1:]
|
||||
|
||||
if len(pathAfterRepo) == 0 {
|
||||
return "", "", "", NewErrorWithStatusLookup(400, "malformed github path: missing action")
|
||||
}
|
||||
|
||||
// 优先处理所有 "releases" 相关的下载路径
|
||||
if strings.HasPrefix(pathAfterRepo, "releases/") {
|
||||
// 情况 A: "releases/download/..."
|
||||
if strings.HasPrefix(pathAfterRepo, "releases/download/") {
|
||||
return user, repo, "releases", nil
|
||||
}
|
||||
// 情况 B: "releases/:tag/download/..."
|
||||
pathAfterReleases := pathAfterRepo[len("releases/"):]
|
||||
slashIndex := strings.IndexByte(pathAfterReleases, '/')
|
||||
if slashIndex > 0 { // 确保tag不为空
|
||||
pathAfterTag := pathAfterReleases[slashIndex+1:]
|
||||
if strings.HasPrefix(pathAfterTag, "download/") {
|
||||
return user, repo, "releases", nil
|
||||
}
|
||||
}
|
||||
// 如果不满足上述下载链接的结构, 则为网页浏览路径, 予以拒绝
|
||||
return "", "", "", NewErrorWithStatusLookup(400, "unsupported releases page, only download links are allowed")
|
||||
}
|
||||
|
||||
// 检查 "archive/" 路径
|
||||
if strings.HasPrefix(pathAfterRepo, "archive/") {
|
||||
// 根据测试用例, archive路径的matcher也应为releases
|
||||
return user, repo, "releases", nil
|
||||
}
|
||||
|
||||
// 如果不是下载路径, 则解析action并进行分类
|
||||
i = strings.IndexByte(pathAfterRepo, '/')
|
||||
action := pathAfterRepo
|
||||
if i != -1 {
|
||||
action = pathAfterRepo[:i]
|
||||
}
|
||||
|
||||
var matcher string
|
||||
switch action {
|
||||
case "releases", "archive":
|
||||
matcher = "releases"
|
||||
case "blob":
|
||||
matcher = "blob"
|
||||
case "raw":
|
||||
@@ -78,59 +116,27 @@ func Matcher(rawPath string, cfg *config.Config) (string, string, string, *GHPro
|
||||
// 匹配 "https://raw.githubusercontent.com/"
|
||||
if strings.HasPrefix(rawPath, rawPrefix) {
|
||||
remaining := rawPath[rawPrefixLen:]
|
||||
// 这里的逻辑与 github.com 的类似, 需要提取 user, repo, branch, file...
|
||||
// 我们只需要 user 和 repo
|
||||
i := strings.IndexByte(remaining, '/')
|
||||
if i <= 0 {
|
||||
return "", "", "", NewErrorWithStatusLookup(400, "malformed raw url: missing user")
|
||||
parts := strings.SplitN(remaining, "/", 3)
|
||||
if len(parts) < 3 {
|
||||
return "", "", "", NewErrorWithStatusLookup(400, "malformed raw url: path too short")
|
||||
}
|
||||
user := remaining[:i]
|
||||
remaining = remaining[i+1:]
|
||||
i = strings.IndexByte(remaining, '/')
|
||||
if i <= 0 {
|
||||
return "", "", "", NewErrorWithStatusLookup(400, "malformed raw url: missing repo")
|
||||
}
|
||||
repo := remaining[:i]
|
||||
// raw 链接至少需要 user/repo/branch 三部分
|
||||
remaining = remaining[i+1:]
|
||||
if len(remaining) == 0 {
|
||||
return "", "", "", NewErrorWithStatusLookup(400, "malformed raw url: missing branch/commit")
|
||||
}
|
||||
return user, repo, "raw", nil
|
||||
return parts[0], parts[1], "raw", nil
|
||||
}
|
||||
|
||||
// 匹配 "https://gist.github.com/"
|
||||
if strings.HasPrefix(rawPath, gistPrefix) {
|
||||
remaining := rawPath[gistPrefixLen:]
|
||||
i := strings.IndexByte(remaining, '/')
|
||||
if i <= 0 {
|
||||
// case: https://gist.github.com/user
|
||||
// 这种情况下, gist_id 缺失, 但我们仍然可以认为 user 是有效的
|
||||
if len(remaining) > 0 {
|
||||
return remaining, "", "gist", nil
|
||||
}
|
||||
// 匹配 "https://gist.github.com/" 或 "https://gist.githubusercontent.com/"
|
||||
isGist := strings.HasPrefix(rawPath, gistPrefix)
|
||||
if isGist || strings.HasPrefix(rawPath, gistContentPrefix) {
|
||||
var remaining string
|
||||
if isGist {
|
||||
remaining = rawPath[gistPrefixLen:]
|
||||
} else {
|
||||
remaining = rawPath[gistContentPrefixLen:]
|
||||
}
|
||||
parts := strings.SplitN(remaining, "/", 2)
|
||||
if len(parts) == 0 || parts[0] == "" {
|
||||
return "", "", "", NewErrorWithStatusLookup(400, "malformed gist url: missing user")
|
||||
}
|
||||
// case: https://gist.github.com/user/gist_id...
|
||||
user := remaining[:i]
|
||||
return user, "", "gist", nil
|
||||
}
|
||||
|
||||
// 匹配 "https://gist.githubusercontent.com/"
|
||||
if strings.HasPrefix(rawPath, gistContentPrefix) {
|
||||
remaining := rawPath[gistContentPrefixLen:]
|
||||
i := strings.IndexByte(remaining, '/')
|
||||
if i <= 0 {
|
||||
// case: https://gist.githubusercontent.com/user
|
||||
// 这种情况下, gist_id 缺失, 但我们仍然可以认为 user 是有效的
|
||||
if len(remaining) > 0 {
|
||||
return remaining, "", "gist", nil
|
||||
}
|
||||
return "", "", "", NewErrorWithStatusLookup(400, "malformed gist url: missing user")
|
||||
}
|
||||
// case: https://gist.githubusercontent.com/user/gist_id...
|
||||
user := remaining[:i]
|
||||
return user, "", "gist", nil
|
||||
return parts[0], "", "gist", nil
|
||||
}
|
||||
|
||||
// 匹配 "https://api.github.com/"
|
||||
|
||||
@@ -33,11 +33,29 @@ func TestMatcher_Compatibility(t *testing.T) {
|
||||
expectedErrCode int
|
||||
}{
|
||||
{
|
||||
name: "GH Releases Path",
|
||||
name: "GH Releases Path 1",
|
||||
rawPath: "https://github.com/owner/repo/releases/download/v1.0/asset.zip",
|
||||
config: cfgWithAuth,
|
||||
expectedUser: "owner", expectedRepo: "repo", expectedMatcher: "releases",
|
||||
},
|
||||
{
|
||||
name: "GH Releases Path 2",
|
||||
rawPath: "https://github.com/owner/repo/releases/v1.0/download/asset.zip",
|
||||
config: cfgWithAuth,
|
||||
expectedUser: "owner", expectedRepo: "repo", expectedMatcher: "releases",
|
||||
},
|
||||
{
|
||||
name: "GH Releases Path Page",
|
||||
rawPath: "https://github.com/owner/repo/releases",
|
||||
config: cfgWithAuth,
|
||||
expectError: true, expectedErrCode: 400,
|
||||
},
|
||||
{
|
||||
name: "GH Releases Path Tag Page",
|
||||
rawPath: "https://github.com/owner/repo/releases/tag/v0.0.1",
|
||||
config: cfgWithAuth,
|
||||
expectError: true, expectedErrCode: 400,
|
||||
},
|
||||
{
|
||||
name: "GH Archive Path",
|
||||
rawPath: "https://github.com/owner/repo.git/archive/main.zip",
|
||||
|
||||
259
proxy/nest.go
259
proxy/nest.go
@@ -2,15 +2,78 @@ package proxy
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"ghproxy/config"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/infinite-iroha/touka"
|
||||
)
|
||||
|
||||
var (
|
||||
prefixGithub = []byte("https://github.com")
|
||||
prefixRawUser = []byte("https://raw.githubusercontent.com")
|
||||
prefixRaw = []byte("https://raw.github.com")
|
||||
prefixGistUser = []byte("https://gist.githubusercontent.com")
|
||||
prefixGist = []byte("https://gist.github.com")
|
||||
prefixAPI = []byte("https://api.github.com")
|
||||
prefixHTTP = []byte("http://")
|
||||
prefixHTTPS = []byte("https://")
|
||||
)
|
||||
|
||||
func EditorMatcherBytes(rawPath []byte, cfg *config.Config) (bool, error) {
|
||||
if bytes.HasPrefix(rawPath, prefixGithub) {
|
||||
return true, nil
|
||||
}
|
||||
if bytes.HasPrefix(rawPath, prefixRawUser) {
|
||||
return true, nil
|
||||
}
|
||||
if bytes.HasPrefix(rawPath, prefixRaw) {
|
||||
return true, nil
|
||||
}
|
||||
if bytes.HasPrefix(rawPath, prefixGistUser) {
|
||||
return true, nil
|
||||
}
|
||||
if bytes.HasPrefix(rawPath, prefixGist) {
|
||||
return true, nil
|
||||
}
|
||||
if cfg.Shell.RewriteAPI {
|
||||
if bytes.HasPrefix(rawPath, prefixAPI) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func modifyURLBytes(url []byte, host []byte, cfg *config.Config) []byte {
|
||||
matched, err := EditorMatcherBytes(url, cfg)
|
||||
if err != nil || !matched {
|
||||
return url
|
||||
}
|
||||
|
||||
var u []byte
|
||||
if bytes.HasPrefix(url, prefixHTTPS) {
|
||||
u = url[len(prefixHTTPS):]
|
||||
} else if bytes.HasPrefix(url, prefixHTTP) {
|
||||
u = url[len(prefixHTTP):]
|
||||
} else {
|
||||
u = url
|
||||
}
|
||||
|
||||
newLen := len(prefixHTTPS) + len(host) + 1 + len(u)
|
||||
newURL := make([]byte, newLen)
|
||||
|
||||
written := 0
|
||||
written += copy(newURL[written:], prefixHTTPS)
|
||||
written += copy(newURL[written:], host)
|
||||
written += copy(newURL[written:], []byte("/"))
|
||||
copy(newURL[written:], u)
|
||||
|
||||
return newURL
|
||||
}
|
||||
|
||||
func EditorMatcher(rawPath string, cfg *config.Config) (bool, error) {
|
||||
// 匹配 "https://github.com"开头的链接
|
||||
if strings.HasPrefix(rawPath, "https://github.com") {
|
||||
@@ -65,116 +128,126 @@ func modifyURL(url string, host string, cfg *config.Config) string {
|
||||
return url
|
||||
}
|
||||
|
||||
// processLinks 处理链接,返回包含处理后数据的 io.Reader
|
||||
func processLinks(input io.ReadCloser, compress string, host string, cfg *config.Config, c *touka.Context) (readerOut io.Reader, written int64, err error) {
|
||||
pipeReader, pipeWriter := io.Pipe() // 创建 io.Pipe
|
||||
var bufferPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
// processLinksStreamingInternal is a link processing function that reads the input line by line.
|
||||
// It is memory-safe for large files but less performant due to numerous small allocations.
|
||||
func processLinksStreamingInternal(input io.ReadCloser, host string, cfg *config.Config, c *touka.Context) (readerOut io.Reader, written int64, err error) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
readerOut = pipeReader
|
||||
|
||||
go func() { // 在 Goroutine 中执行写入操作
|
||||
go func() {
|
||||
defer func() {
|
||||
if pipeWriter != nil { // 确保 pipeWriter 关闭,即使发生错误
|
||||
if err != nil {
|
||||
if closeErr := pipeWriter.CloseWithError(err); closeErr != nil { // 如果有错误,传递错误给 reader
|
||||
c.Errorf("pipeWriter close with error failed: %v, original error: %v", closeErr, err)
|
||||
}
|
||||
} else {
|
||||
if closeErr := pipeWriter.Close(); closeErr != nil { // 没有错误,正常关闭
|
||||
c.Errorf("pipeWriter close failed: %v", closeErr)
|
||||
if err == nil { // 如果之前没有错误,记录关闭错误
|
||||
err = closeErr
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
} else {
|
||||
pipeWriter.Close()
|
||||
}
|
||||
}()
|
||||
defer input.Close()
|
||||
|
||||
defer func() {
|
||||
if err := input.Close(); err != nil {
|
||||
c.Errorf("input close failed: %v", err)
|
||||
}
|
||||
bufReader := bufio.NewReader(input)
|
||||
bufWriter := bufio.NewWriterSize(pipeWriter, 4096)
|
||||
defer bufWriter.Flush()
|
||||
|
||||
}()
|
||||
|
||||
var bufReader *bufio.Reader
|
||||
|
||||
if compress == "gzip" {
|
||||
// 解压gzip
|
||||
gzipReader, gzipErr := gzip.NewReader(input)
|
||||
if gzipErr != nil {
|
||||
err = fmt.Errorf("gzip解压错误: %v", gzipErr)
|
||||
return // Goroutine 中使用 return 返回错误
|
||||
}
|
||||
defer gzipReader.Close()
|
||||
bufReader = bufio.NewReader(gzipReader)
|
||||
} else {
|
||||
bufReader = bufio.NewReader(input)
|
||||
}
|
||||
|
||||
var bufWriter *bufio.Writer
|
||||
var gzipWriter *gzip.Writer
|
||||
|
||||
// 根据是否gzip确定 writer 的创建
|
||||
if compress == "gzip" {
|
||||
gzipWriter = gzip.NewWriter(pipeWriter) // 使用 pipeWriter
|
||||
bufWriter = bufio.NewWriterSize(gzipWriter, 4096) //设置缓冲区大小
|
||||
} else {
|
||||
bufWriter = bufio.NewWriterSize(pipeWriter, 4096) // 使用 pipeWriter
|
||||
}
|
||||
|
||||
//确保writer关闭
|
||||
defer func() {
|
||||
var closeErr error // 局部变量,用于保存defer中可能发生的错误
|
||||
|
||||
if gzipWriter != nil {
|
||||
if closeErr = gzipWriter.Close(); closeErr != nil {
|
||||
c.Errorf("gzipWriter close failed %v", closeErr)
|
||||
// 如果已经存在错误,则保留。否则,记录此错误。
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}
|
||||
}
|
||||
if flushErr := bufWriter.Flush(); flushErr != nil {
|
||||
c.Errorf("writer flush failed %v", flushErr)
|
||||
// 如果已经存在错误,则保留。否则,记录此错误。
|
||||
if err == nil {
|
||||
err = flushErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// 使用正则表达式匹配 http 和 https 链接
|
||||
for {
|
||||
line, readErr := bufReader.ReadString('\n')
|
||||
if readErr != nil {
|
||||
if readErr == io.EOF {
|
||||
break // 文件结束
|
||||
}
|
||||
err = fmt.Errorf("读取行错误: %v", readErr) // 传递错误
|
||||
return // Goroutine 中使用 return 返回错误
|
||||
if readErr != nil && readErr != io.EOF {
|
||||
err = fmt.Errorf("read error: %w", readErr)
|
||||
return
|
||||
}
|
||||
|
||||
// 替换所有匹配的 URL
|
||||
modifiedLine := urlPattern.ReplaceAllStringFunc(line, func(originalURL string) string {
|
||||
return modifyURL(originalURL, host, cfg) // 假设 modifyURL 函数已定义
|
||||
return modifyURL(originalURL, host, cfg)
|
||||
})
|
||||
|
||||
n, writeErr := bufWriter.WriteString(modifiedLine)
|
||||
written += int64(n) // 更新写入的字节数
|
||||
if writeErr != nil {
|
||||
err = fmt.Errorf("写入文件错误: %v", writeErr) // 传递错误
|
||||
return // Goroutine 中使用 return 返回错误
|
||||
var n int
|
||||
n, err = bufWriter.WriteString(modifiedLine)
|
||||
written += int64(n)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("write error: %w", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 在返回之前,再刷新一次 (虽然 defer 中已经有 flush,但这里再加一次确保及时刷新)
|
||||
if flushErr := bufWriter.Flush(); flushErr != nil {
|
||||
if err == nil { // 避免覆盖之前的错误
|
||||
err = flushErr
|
||||
if readErr == io.EOF {
|
||||
break
|
||||
}
|
||||
return // Goroutine 中使用 return 返回错误
|
||||
}
|
||||
}()
|
||||
|
||||
return readerOut, written, nil // 返回 reader 和 written,error 由 Goroutine 通过 pipeWriter.CloseWithError 传递
|
||||
return readerOut, written, nil
|
||||
}
|
||||
|
||||
// processLinks acts as a dispatcher, choosing the best processing strategy based on file size.
|
||||
// It uses a memory-safe streaming approach for large or unknown-size files,
|
||||
// and a high-performance buffered approach for smaller files.
|
||||
func processLinks(input io.ReadCloser, host string, cfg *config.Config, c *touka.Context, bodySize int) (readerOut io.Reader, written int64, err error) {
|
||||
const sizeThreshold = 256 * 1024 // 256KB
|
||||
|
||||
// Use streaming for large or unknown size files to prevent OOM
|
||||
if bodySize == -1 || bodySize > sizeThreshold {
|
||||
c.Debugf("Using streaming processor for large/unknown size file (%d bytes)", bodySize)
|
||||
return processLinksStreamingInternal(input, host, cfg, c)
|
||||
} else {
|
||||
c.Debugf("Using buffered processor for small file (%d bytes)", bodySize)
|
||||
return processLinksBufferedInternal(input, host, cfg, c)
|
||||
}
|
||||
}
|
||||
|
||||
// processLinksBufferedInternal a link processing function that reads the entire content into a buffer.
|
||||
// It is optimized for performance on smaller files but carries an OOM risk for large files.
|
||||
func processLinksBufferedInternal(input io.ReadCloser, host string, cfg *config.Config, c *touka.Context) (readerOut io.Reader, written int64, err error) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
readerOut = pipeReader
|
||||
hostBytes := []byte(host)
|
||||
|
||||
go func() {
|
||||
// 在 goroutine 退出时, 根据 err 是否为 nil, 带错误或正常关闭 pipeWriter
|
||||
defer func() {
|
||||
if closeErr := input.Close(); closeErr != nil {
|
||||
c.Errorf("input close failed: %v", closeErr)
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if closeErr := pipeWriter.CloseWithError(err); closeErr != nil {
|
||||
c.Errorf("pipeWriter close with error failed: %v", closeErr)
|
||||
}
|
||||
} else {
|
||||
if closeErr := pipeWriter.Close(); closeErr != nil {
|
||||
c.Errorf("pipeWriter close failed: %v", closeErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
buf := bufferPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufferPool.Put(buf)
|
||||
|
||||
// 将全部输入读入复用的缓冲区
|
||||
if _, err = buf.ReadFrom(input); err != nil {
|
||||
err = fmt.Errorf("reading input failed: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
// 使用 ReplaceAllFunc 和字节版本辅助函数, 实现准零分配
|
||||
modifiedBytes := urlPattern.ReplaceAllFunc(buf.Bytes(), func(originalURL []byte) []byte {
|
||||
return modifyURLBytes(originalURL, hostBytes, cfg)
|
||||
})
|
||||
|
||||
// 将处理后的字节写回管道
|
||||
var n int
|
||||
n, err = pipeWriter.Write(modifiedBytes)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("writing to pipe failed: %w", err)
|
||||
return
|
||||
}
|
||||
written = int64(n)
|
||||
}()
|
||||
|
||||
return readerOut, written, nil
|
||||
}
|
||||
|
||||
65
proxy/nest_bench_test.go
Normal file
65
proxy/nest_bench_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"ghproxy/config"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const benchmarkInput = `
|
||||
Some text here.
|
||||
Link to be replaced: http://github.com/user/repo
|
||||
Another link: https://google.com
|
||||
And one more: http://example.com/some/path
|
||||
This should not be replaced: notalink
|
||||
End of text.
|
||||
`
|
||||
|
||||
func BenchmarkProcessLinksStreaming(b *testing.B) {
|
||||
cfg := &config.Config{}
|
||||
host := "my-proxy.com"
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
input := io.NopCloser(strings.NewReader(benchmarkInput))
|
||||
b.StartTimer()
|
||||
|
||||
reader, _, err := processLinksStreamingInternal(input, host, cfg, nil)
|
||||
if err != nil {
|
||||
b.Fatalf("processLinksStreamingInternal failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = io.ReadAll(reader)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to read from processed reader: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProcessLinksBuffered(b *testing.B) {
|
||||
cfg := &config.Config{}
|
||||
host := "my-proxy.com"
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
input := io.NopCloser(strings.NewReader(benchmarkInput))
|
||||
b.StartTimer()
|
||||
|
||||
reader, _, err := processLinksBufferedInternal(input, host, cfg, nil)
|
||||
if err != nil {
|
||||
b.Fatalf("processLinksBufferedInternal failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = io.ReadAll(reader)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to read from processed reader: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,7 @@ var (
|
||||
"CDN-Loop": {},
|
||||
"Upgrade": {},
|
||||
"Connection": {},
|
||||
"Accept-Encoding": {},
|
||||
}
|
||||
|
||||
cloneHeadersToRemove = map[string]struct{}{
|
||||
@@ -43,7 +44,7 @@ var (
|
||||
var (
|
||||
defaultHeaders = map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Accept-Encoding": "gzip",
|
||||
"Accept-Encoding": "",
|
||||
"Transfer-Encoding": "chunked",
|
||||
"User-Agent": "GHProxy/1.0",
|
||||
}
|
||||
|
||||
@@ -7,6 +7,33 @@ import (
|
||||
"github.com/infinite-iroha/touka"
|
||||
)
|
||||
|
||||
// buildRoutingPath 使用 strings.Builder 来高效地构建最终的 URL.
|
||||
// 这避免了使用标准字符串拼接时发生的多次内存分配.
|
||||
func buildRoutingPath(rawPath, matcher string) string {
|
||||
var sb strings.Builder
|
||||
// 预分配内存以提高性能
|
||||
// (This comment is in Chinese as requested by the user)
|
||||
sb.Grow(len(rawPath) + 30)
|
||||
sb.WriteString("https://")
|
||||
|
||||
if matcher == "blob" {
|
||||
sb.WriteString("raw.githubusercontent.com")
|
||||
if len(rawPath) > 10 { // len("github.com")
|
||||
pathSegment := rawPath[10:]
|
||||
if i := strings.Index(pathSegment, "/blob/"); i != -1 {
|
||||
sb.WriteString(pathSegment[:i])
|
||||
sb.WriteString("/")
|
||||
sb.WriteString(pathSegment[i+len("/blob/"):])
|
||||
} else {
|
||||
sb.WriteString(pathSegment)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
sb.WriteString(rawPath)
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func RoutingHandler(cfg *config.Config) touka.HandlerFunc {
|
||||
return func(c *touka.Context) {
|
||||
|
||||
@@ -44,17 +71,11 @@ func RoutingHandler(cfg *config.Config) touka.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
// 处理blob/raw路径
|
||||
rawPath = buildRoutingPath(rawPath, matcher)
|
||||
if matcher == "blob" {
|
||||
rawPath = rawPath[10:]
|
||||
rawPath = "raw.githubusercontent.com" + rawPath
|
||||
rawPath = strings.Replace(rawPath, "/blob/", "/", 1)
|
||||
matcher = "raw"
|
||||
}
|
||||
|
||||
// 为rawpath加入https:// 头
|
||||
rawPath = "https://" + rawPath
|
||||
|
||||
switch matcher {
|
||||
case "releases", "blob", "raw", "gist", "api":
|
||||
ChunkedProxyRequest(ctx, c, rawPath, cfg, matcher)
|
||||
|
||||
Reference in New Issue
Block a user