Compare commits

...

7 Commits
4.3.2 ... dev

Author SHA1 Message Date
WJQSERVER
112bf9a52c Update CHANGELOG.md
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-09-14 10:09:58 +08:00
wjqserver
3d05902824 update nest: use dispatcher to get lower allocs(4.3.5-rc.0) 2025-09-14 10:05:25 +08:00
wjqserver
3adc110298 update json/v2 2025-09-14 07:45:47 +08:00
wjqserver
ba33d5743f 4.3.4 2025-09-14 07:44:46 +08:00
wjqserver
bd9f590b0a 4.3.4 2025-09-14 07:31:41 +08:00
wjqserver
e3f84f4c17 fix retrun, change to false 2025-09-10 03:36:15 +08:00
wjqserver
4a7ad2ec75 4.3.3 2025-09-10 03:21:14 +08:00
12 changed files with 365 additions and 144 deletions

View File

@@ -1,5 +1,21 @@
# 更新日志
4.3.5-rc.0 - 2025-09-14
---
- PRE-RELEASE: v4.3.5-rc.0是v4.3.5的预发布版本,请勿在生产环境中使用;
- CHANGE: 改进`nest`实现, 减少内存分配`10371 B/op -> 1852 B/op` `43 allocs/op -> 14 allocs/op`
- CHANGE: 为`nest`加入`dispatcher`实现, 为不同情况分配适合的处理器以保证性能与兼容性
- CHANGE: 改进路径匹配热点的内存分配
4.3.4 - 2025-09-14
---
- CHANGE: 改进嵌套加速实现, 增强稳定性
4.3.3 - 2025-09-10
---
- CHANGE: 增强对[wanf](https://github.com/WJQSERVER/wanf)的支持
- CHANGE: 更新包括Touka框架在内的各个依赖版本
4.3.2 - 2025-08-20
---
- FIX: 修正`cfg.Pages.StaticDir`为空时的处置
@@ -16,7 +32,7 @@
4.3.0-rc.0 - 2025-08-11
---
- PRE-RELEASE: v4.3.0-rc.0是v4.3.0发布版本,请勿在生产环境中使用;
- PRE-RELEASE: v4.3.0-rc.0是v4.3.0的预发布版本,请勿在生产环境中使用;
- CHANGE: 为OCI镜像(Docker)代理带来自动library附加功能
- CHANGE(refactor): 改进OCI镜像(Docker)代理路径组成流程
- ADD: 新增[WANF](https://github.com/WJQSERVER/wanf)配置文件格式支持

View File

@@ -1 +1 @@
4.3.0-rc.0
4.3.5-rc.0

View File

@@ -1 +1 @@
4.3.2
4.3.4

View File

@@ -1,8 +1,10 @@
package config
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/BurntSushi/toml"
@@ -212,7 +214,8 @@ type DockerConfig struct {
// LoadConfig 从配置文件加载配置
func LoadConfig(filePath string) (*Config, error) {
if !FileExists(filePath) {
exist, filePath2read := FileExists(filePath)
if !exist {
// 楔入配置文件
err := DefaultConfig().WriteConfig(filePath)
if err != nil {
@@ -221,15 +224,15 @@ func LoadConfig(filePath string) (*Config, error) {
return DefaultConfig(), nil
}
var config Config
ext := filepath.Ext(filePath)
ext := filepath.Ext(filePath2read)
if ext == ".wanf" {
if err := wanf.DecodeFile(filePath, &config); err != nil {
if err := wanf.DecodeFile(filePath2read, &config); err != nil {
return nil, err
}
return &config, nil
}
if _, err := toml.DecodeFile(filePath, &config); err != nil {
if _, err := toml.DecodeFile(filePath2read, &config); err != nil {
return nil, err
}
return &config, nil
@@ -257,9 +260,37 @@ func (c *Config) WriteConfig(filePath string) error {
}
// FileExists 检测文件是否存在
func FileExists(filename string) bool {
func FileExists(filename string) (bool, string) {
_, err := os.Stat(filename)
return !os.IsNotExist(err)
if err == nil {
return true, filename
}
if os.IsNotExist(err) {
// 获取文件名(不包含路径)
base := filepath.Base(filename)
dir := filepath.Dir(filename)
// 获取扩展名
fileNameBody := strings.TrimSuffix(base, filepath.Ext(base))
// 重新组合路径, 扩展名改为.wanf, 确认是否存在
wanfFilename := filepath.Join(dir, fileNameBody+".wanf")
_, err = os.Stat(wanfFilename)
if err == nil {
// .wanf 文件存在
fmt.Printf("\n Found .wanf file: %s\n", wanfFilename)
return true, wanfFilename
} else if os.IsNotExist(err) {
// .wanf 文件不存在
return false, ""
} else {
// 其他错误
return false, ""
}
} else {
return false, filename
}
}
// DefaultConfig 返回默认配置结构体

10
go.mod
View File

@@ -1,12 +1,12 @@
module ghproxy
go 1.25
go 1.25.1
require (
github.com/BurntSushi/toml v1.5.0
github.com/WJQSERVER-STUDIO/httpc v0.8.2
golang.org/x/net v0.43.0
golang.org/x/time v0.12.0
golang.org/x/net v0.44.0
golang.org/x/time v0.13.0
)
require (
@@ -18,9 +18,9 @@ require (
github.com/fenthope/ipfilter v0.0.1
github.com/fenthope/reco v0.0.4
github.com/fenthope/record v0.0.4
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced
github.com/go-json-experiment/json v0.0.0-20250910080747-cc2cfa0554c3
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/infinite-iroha/touka v0.3.6
github.com/infinite-iroha/touka v0.3.7
github.com/wjqserver/modembed v0.0.1
)

18
go.sum
View File

@@ -18,17 +18,19 @@ github.com/fenthope/reco v0.0.4 h1:yo2g3aWwdoMpaZWZX4SdZOW7mCK82RQIU/YI8ZUQThM=
github.com/fenthope/reco v0.0.4/go.mod h1:eMyS8HpdMVdJ/2WJt6Cvt8P1EH9Igzj5lSJrgc+0jeg=
github.com/fenthope/record v0.0.4 h1:/1JHNCxiXGLL/qCh4LEGaAvhj4CcKsb6siTxjLmjdO4=
github.com/fenthope/record v0.0.4/go.mod h1:G0a6KCiCDyX2SsC3nfzSN651fJKxH482AyJvzlnvAJU=
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I=
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
github.com/go-json-experiment/json v0.0.0-20250813233538-9b1f9ea2e11b h1:6Q4zRHXS/YLOl9Ng1b1OOOBWMidAQZR3Gel0UKPC/KU=
github.com/go-json-experiment/json v0.0.0-20250813233538-9b1f9ea2e11b/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
github.com/go-json-experiment/json v0.0.0-20250910080747-cc2cfa0554c3 h1:02WINGfSX5w0Mn+F28UyRoSt9uvMhKguwWMlOAh6U/0=
github.com/go-json-experiment/json v0.0.0-20250910080747-cc2cfa0554c3/go.mod h1:uNVvRXArCGbZ508SxYYTC5v1JWoz2voff5pm25jU1Ok=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/infinite-iroha/touka v0.3.6 h1:SkpM/VFGCWOFQP3RRuoWdX/Q4zafPngG1VMwkrLwtkw=
github.com/infinite-iroha/touka v0.3.6/go.mod h1:XW7a3fpLAjJfylSmdNuDQ8wGKkKmLVi9V/89sT1d7uw=
github.com/infinite-iroha/touka v0.3.7 h1:bIIZW5Weh7lVpyOWh4FmyR9UOfb5FOt+cR9yQ30FJLA=
github.com/infinite-iroha/touka v0.3.7/go.mod h1:uwkF1gTrNEgQ4P/Gwtk6WLbERehq3lzB8x1FMedyrfE=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/wjqserver/modembed v0.0.1 h1:8ZDz7t9M5DLrUFlYgBUUmrMzxWsZPmHvOazkr/T2jEs=
github.com/wjqserver/modembed v0.0.1/go.mod h1:sYbQJMAjSBsdYQrUsuHY380XXE1CuRh8g9yyCztTXOQ=
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=

View File

@@ -127,18 +127,14 @@ func ChunkedProxyRequest(ctx context.Context, c *touka.Context, u string, cfg *c
defer bodyReader.Close()
if MatcherShell(u) && matchString(matcher) && cfg.Shell.Editor {
// 判断body是不是gzip
var compress string
if resp.Header.Get("Content-Encoding") == "gzip" {
compress = "gzip"
}
c.Debugf("Use Shell Editor: %s %s %s %s %s", c.ClientIP(), c.Request.Method, u, c.UserAgent(), c.Request.Proto)
c.Header("Content-Length", "")
c.DelHeader("Content-Length")
c.DelHeader("Content-Encoding")
var reader io.Reader
reader, _, err = processLinks(bodyReader, compress, c.Request.Host, cfg, c)
reader, _, err = processLinks(bodyReader, c.Request.Host, cfg, c, bodySize)
c.WriteStream(reader)
if err != nil {
c.Errorf("%s %s %s %s %s Failed to copy response body: %v", c.ClientIP(), c.Request.Method, u, c.UserAgent(), c.Request.Proto, err)
@@ -146,7 +142,6 @@ func ChunkedProxyRequest(ctx context.Context, c *touka.Context, u string, cfg *c
return
}
} else {
if contentLength != "" {
c.SetHeader("Content-Length", contentLength)
c.WriteStream(bodyReader)

View File

@@ -9,6 +9,32 @@ import (
"github.com/infinite-iroha/touka"
)
// buildHandlerPath 使用 strings.Builder 来高效地构建最终的 URL.
// 这避免了使用标准字符串拼接时发生的多次内存分配.
func buildHandlerPath(path, matcher string) string {
var sb strings.Builder
sb.Grow(len(path) + 50)
if matcher == "blob" && strings.HasPrefix(path, "github.com") {
sb.WriteString("https://raw.githubusercontent.com")
if len(path) > 10 { // len("github.com")
pathSegment := path[10:] // skip "github.com"
if i := strings.Index(pathSegment, "/blob/"); i != -1 {
sb.WriteString(pathSegment[:i])
sb.WriteString("/")
sb.WriteString(pathSegment[i+len("/blob/"):])
} else {
sb.WriteString(pathSegment)
}
}
} else {
sb.WriteString("https://")
sb.WriteString(path)
}
return sb.String()
}
var re = regexp.MustCompile(`^(http:|https:)?/?/?(.*)`) // 匹配http://或https://开头的路径
func NoRouteHandler(cfg *config.Config) touka.HandlerFunc {
@@ -32,21 +58,16 @@ func NoRouteHandler(cfg *config.Config) touka.HandlerFunc {
}
// 制作url
rawPath = "https://" + matches[2]
var (
user string
repo string
matcher string
)
path := matches[2]
var matcherErr *GHProxyErrors
user, repo, matcher, matcherErr = Matcher(rawPath, cfg)
user, repo, matcher, matcherErr := Matcher("https://"+path, cfg)
if matcherErr != nil {
ErrorPage(c, matcherErr)
return
}
rawPath = buildHandlerPath(path, matcher)
shoudBreak = listCheck(cfg, c, user, repo, rawPath)
if shoudBreak {
return
@@ -57,11 +78,7 @@ func NoRouteHandler(cfg *config.Config) touka.HandlerFunc {
return
}
// 处理blob/raw路径
if matcher == "blob" {
rawPath = rawPath[18:]
rawPath = "https://raw.githubusercontent.com" + rawPath
rawPath = strings.Replace(rawPath, "/blob/", "/", 1)
matcher = "raw"
}

View File

@@ -2,15 +2,78 @@ package proxy
import (
"bufio"
"compress/gzip"
"bytes"
"fmt"
"ghproxy/config"
"io"
"strings"
"sync"
"github.com/infinite-iroha/touka"
)
var (
prefixGithub = []byte("https://github.com")
prefixRawUser = []byte("https://raw.githubusercontent.com")
prefixRaw = []byte("https://raw.github.com")
prefixGistUser = []byte("https://gist.githubusercontent.com")
prefixGist = []byte("https://gist.github.com")
prefixAPI = []byte("https://api.github.com")
prefixHTTP = []byte("http://")
prefixHTTPS = []byte("https://")
)
func EditorMatcherBytes(rawPath []byte, cfg *config.Config) (bool, error) {
if bytes.HasPrefix(rawPath, prefixGithub) {
return true, nil
}
if bytes.HasPrefix(rawPath, prefixRawUser) {
return true, nil
}
if bytes.HasPrefix(rawPath, prefixRaw) {
return true, nil
}
if bytes.HasPrefix(rawPath, prefixGistUser) {
return true, nil
}
if bytes.HasPrefix(rawPath, prefixGist) {
return true, nil
}
if cfg.Shell.RewriteAPI {
if bytes.HasPrefix(rawPath, prefixAPI) {
return true, nil
}
}
return false, nil
}
func modifyURLBytes(url []byte, host []byte, cfg *config.Config) []byte {
matched, err := EditorMatcherBytes(url, cfg)
if err != nil || !matched {
return url
}
var u []byte
if bytes.HasPrefix(url, prefixHTTPS) {
u = url[len(prefixHTTPS):]
} else if bytes.HasPrefix(url, prefixHTTP) {
u = url[len(prefixHTTP):]
} else {
u = url
}
newLen := len(prefixHTTPS) + len(host) + 1 + len(u)
newURL := make([]byte, newLen)
written := 0
written += copy(newURL[written:], prefixHTTPS)
written += copy(newURL[written:], host)
written += copy(newURL[written:], []byte("/"))
copy(newURL[written:], u)
return newURL
}
func EditorMatcher(rawPath string, cfg *config.Config) (bool, error) {
// 匹配 "https://github.com"开头的链接
if strings.HasPrefix(rawPath, "https://github.com") {
@@ -65,116 +128,126 @@ func modifyURL(url string, host string, cfg *config.Config) string {
return url
}
// processLinks 处理链接,返回包含处理后数据的 io.Reader
func processLinks(input io.ReadCloser, compress string, host string, cfg *config.Config, c *touka.Context) (readerOut io.Reader, written int64, err error) {
pipeReader, pipeWriter := io.Pipe() // 创建 io.Pipe
var bufferPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
// processLinksStreamingInternal is a link processing function that reads the input line by line.
// It is memory-safe for large files but less performant due to numerous small allocations.
func processLinksStreamingInternal(input io.ReadCloser, host string, cfg *config.Config, c *touka.Context) (readerOut io.Reader, written int64, err error) {
pipeReader, pipeWriter := io.Pipe()
readerOut = pipeReader
go func() { // 在 Goroutine 中执行写入操作
go func() {
defer func() {
if pipeWriter != nil { // 确保 pipeWriter 关闭,即使发生错误
if err != nil {
if closeErr := pipeWriter.CloseWithError(err); closeErr != nil { // 如果有错误,传递错误给 reader
c.Errorf("pipeWriter close with error failed: %v, original error: %v", closeErr, err)
}
} else {
if closeErr := pipeWriter.Close(); closeErr != nil { // 没有错误,正常关闭
c.Errorf("pipeWriter close failed: %v", closeErr)
if err == nil { // 如果之前没有错误,记录关闭错误
err = closeErr
}
}
}
if err != nil {
pipeWriter.CloseWithError(err)
} else {
pipeWriter.Close()
}
}()
defer input.Close()
defer func() {
if err := input.Close(); err != nil {
c.Errorf("input close failed: %v", err)
}
bufReader := bufio.NewReader(input)
bufWriter := bufio.NewWriterSize(pipeWriter, 4096)
defer bufWriter.Flush()
}()
var bufReader *bufio.Reader
if compress == "gzip" {
// 解压gzip
gzipReader, gzipErr := gzip.NewReader(input)
if gzipErr != nil {
err = fmt.Errorf("gzip解压错误: %v", gzipErr)
return // Goroutine 中使用 return 返回错误
}
defer gzipReader.Close()
bufReader = bufio.NewReader(gzipReader)
} else {
bufReader = bufio.NewReader(input)
}
var bufWriter *bufio.Writer
var gzipWriter *gzip.Writer
// 根据是否gzip确定 writer 的创建
if compress == "gzip" {
gzipWriter = gzip.NewWriter(pipeWriter) // 使用 pipeWriter
bufWriter = bufio.NewWriterSize(gzipWriter, 4096) //设置缓冲区大小
} else {
bufWriter = bufio.NewWriterSize(pipeWriter, 4096) // 使用 pipeWriter
}
//确保writer关闭
defer func() {
var closeErr error // 局部变量用于保存defer中可能发生的错误
if gzipWriter != nil {
if closeErr = gzipWriter.Close(); closeErr != nil {
c.Errorf("gzipWriter close failed %v", closeErr)
// 如果已经存在错误,则保留。否则,记录此错误。
if err == nil {
err = closeErr
}
}
}
if flushErr := bufWriter.Flush(); flushErr != nil {
c.Errorf("writer flush failed %v", flushErr)
// 如果已经存在错误,则保留。否则,记录此错误。
if err == nil {
err = flushErr
}
}
}()
// 使用正则表达式匹配 http 和 https 链接
for {
line, readErr := bufReader.ReadString('\n')
if readErr != nil {
if readErr == io.EOF {
break // 文件结束
}
err = fmt.Errorf("读取行错误: %v", readErr) // 传递错误
return // Goroutine 中使用 return 返回错误
if readErr != nil && readErr != io.EOF {
err = fmt.Errorf("read error: %w", readErr)
return
}
// 替换所有匹配的 URL
modifiedLine := urlPattern.ReplaceAllStringFunc(line, func(originalURL string) string {
return modifyURL(originalURL, host, cfg) // 假设 modifyURL 函数已定义
return modifyURL(originalURL, host, cfg)
})
n, writeErr := bufWriter.WriteString(modifiedLine)
written += int64(n) // 更新写入的字节数
if writeErr != nil {
err = fmt.Errorf("写入文件错误: %v", writeErr) // 传递错误
return // Goroutine 中使用 return 返回错误
var n int
n, err = bufWriter.WriteString(modifiedLine)
written += int64(n)
if err != nil {
err = fmt.Errorf("write error: %w", err)
return
}
}
// 在返回之前,再刷新一次 (虽然 defer 中已经有 flush但这里再加一次确保及时刷新)
if flushErr := bufWriter.Flush(); flushErr != nil {
if err == nil { // 避免覆盖之前的错误
err = flushErr
if readErr == io.EOF {
break
}
return // Goroutine 中使用 return 返回错误
}
}()
return readerOut, written, nil // 返回 reader 和 writtenerror 由 Goroutine 通过 pipeWriter.CloseWithError 传递
return readerOut, written, nil
}
// processLinks acts as a dispatcher, choosing the best processing strategy based on file size.
// It uses a memory-safe streaming approach for large or unknown-size files,
// and a high-performance buffered approach for smaller files.
func processLinks(input io.ReadCloser, host string, cfg *config.Config, c *touka.Context, bodySize int) (readerOut io.Reader, written int64, err error) {
const sizeThreshold = 256 * 1024 // 256KB
// Use streaming for large or unknown size files to prevent OOM
if bodySize == -1 || bodySize > sizeThreshold {
c.Debugf("Using streaming processor for large/unknown size file (%d bytes)", bodySize)
return processLinksStreamingInternal(input, host, cfg, c)
} else {
c.Debugf("Using buffered processor for small file (%d bytes)", bodySize)
return processLinksBufferedInternal(input, host, cfg, c)
}
}
// processLinksBufferedInternal a link processing function that reads the entire content into a buffer.
// It is optimized for performance on smaller files but carries an OOM risk for large files.
func processLinksBufferedInternal(input io.ReadCloser, host string, cfg *config.Config, c *touka.Context) (readerOut io.Reader, written int64, err error) {
pipeReader, pipeWriter := io.Pipe()
readerOut = pipeReader
hostBytes := []byte(host)
go func() {
// 在 goroutine 退出时, 根据 err 是否为 nil, 带错误或正常关闭 pipeWriter
defer func() {
if closeErr := input.Close(); closeErr != nil {
c.Errorf("input close failed: %v", closeErr)
}
}()
defer func() {
if err != nil {
if closeErr := pipeWriter.CloseWithError(err); closeErr != nil {
c.Errorf("pipeWriter close with error failed: %v", closeErr)
}
} else {
if closeErr := pipeWriter.Close(); closeErr != nil {
c.Errorf("pipeWriter close failed: %v", closeErr)
}
}
}()
buf := bufferPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufferPool.Put(buf)
// 将全部输入读入复用的缓冲区
if _, err = buf.ReadFrom(input); err != nil {
err = fmt.Errorf("reading input failed: %w", err)
return
}
// 使用 ReplaceAllFunc 和字节版本辅助函数, 实现准零分配
modifiedBytes := urlPattern.ReplaceAllFunc(buf.Bytes(), func(originalURL []byte) []byte {
return modifyURLBytes(originalURL, hostBytes, cfg)
})
// 将处理后的字节写回管道
var n int
n, err = pipeWriter.Write(modifiedBytes)
if err != nil {
err = fmt.Errorf("writing to pipe failed: %w", err)
return
}
written = int64(n)
}()
return readerOut, written, nil
}

65
proxy/nest_bench_test.go Normal file
View File

@@ -0,0 +1,65 @@
package proxy
import (
"ghproxy/config"
"io"
"strings"
"testing"
)
const benchmarkInput = `
Some text here.
Link to be replaced: http://github.com/user/repo
Another link: https://google.com
And one more: http://example.com/some/path
This should not be replaced: notalink
End of text.
`
func BenchmarkProcessLinksStreaming(b *testing.B) {
cfg := &config.Config{}
host := "my-proxy.com"
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
input := io.NopCloser(strings.NewReader(benchmarkInput))
b.StartTimer()
reader, _, err := processLinksStreamingInternal(input, host, cfg, nil)
if err != nil {
b.Fatalf("processLinksStreamingInternal failed: %v", err)
}
_, err = io.ReadAll(reader)
if err != nil {
b.Fatalf("Failed to read from processed reader: %v", err)
}
}
}
func BenchmarkProcessLinksBuffered(b *testing.B) {
cfg := &config.Config{}
host := "my-proxy.com"
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
input := io.NopCloser(strings.NewReader(benchmarkInput))
b.StartTimer()
reader, _, err := processLinksBufferedInternal(input, host, cfg, nil)
if err != nil {
b.Fatalf("processLinksBufferedInternal failed: %v", err)
}
_, err = io.ReadAll(reader)
if err != nil {
b.Fatalf("Failed to read from processed reader: %v", err)
}
}
}

View File

@@ -27,6 +27,7 @@ var (
"CDN-Loop": {},
"Upgrade": {},
"Connection": {},
"Accept-Encoding": {},
}
cloneHeadersToRemove = map[string]struct{}{
@@ -43,7 +44,7 @@ var (
var (
defaultHeaders = map[string]string{
"Accept": "*/*",
"Accept-Encoding": "gzip",
"Accept-Encoding": "",
"Transfer-Encoding": "chunked",
"User-Agent": "GHProxy/1.0",
}

View File

@@ -7,6 +7,33 @@ import (
"github.com/infinite-iroha/touka"
)
// buildRoutingPath 使用 strings.Builder 来高效地构建最终的 URL.
// 这避免了使用标准字符串拼接时发生的多次内存分配.
func buildRoutingPath(rawPath, matcher string) string {
var sb strings.Builder
// 预分配内存以提高性能
// (This comment is in Chinese as requested by the user)
sb.Grow(len(rawPath) + 30)
sb.WriteString("https://")
if matcher == "blob" {
sb.WriteString("raw.githubusercontent.com")
if len(rawPath) > 10 { // len("github.com")
pathSegment := rawPath[10:]
if i := strings.Index(pathSegment, "/blob/"); i != -1 {
sb.WriteString(pathSegment[:i])
sb.WriteString("/")
sb.WriteString(pathSegment[i+len("/blob/"):])
} else {
sb.WriteString(pathSegment)
}
}
} else {
sb.WriteString(rawPath)
}
return sb.String()
}
func RoutingHandler(cfg *config.Config) touka.HandlerFunc {
return func(c *touka.Context) {
@@ -44,17 +71,11 @@ func RoutingHandler(cfg *config.Config) touka.HandlerFunc {
return
}
// 处理blob/raw路径
rawPath = buildRoutingPath(rawPath, matcher)
if matcher == "blob" {
rawPath = rawPath[10:]
rawPath = "raw.githubusercontent.com" + rawPath
rawPath = strings.Replace(rawPath, "/blob/", "/", 1)
matcher = "raw"
}
// 为rawpath加入https:// 头
rawPath = "https://" + rawPath
switch matcher {
case "releases", "blob", "raw", "gist", "api":
ChunkedProxyRequest(ctx, c, rawPath, cfg, matcher)