mirror of
https://github.com/WJQSERVER-STUDIO/ghproxy.git
synced 2026-02-03 08:11:11 +08:00
25w20b
This commit is contained in:
parent
2e974ad7ae
commit
a0cfe826ea
10 changed files with 55 additions and 342 deletions
|
|
@ -1,5 +1,9 @@
|
||||||
# 更新日志
|
# 更新日志
|
||||||
|
|
||||||
|
25w20b - 2025-03-19
|
||||||
|
---
|
||||||
|
- PRE-RELEASE: 此版本是v3.0.0的预发布版本,请勿在生产环境中使用; v3.0.0会与v2.4.0及以上保证兼容关系, 可平顺升级;
|
||||||
|
|
||||||
25w20a - 2025-03-18
|
25w20a - 2025-03-18
|
||||||
---
|
---
|
||||||
- PRE-RELEASE: 此版本是v3.0.0的预发布版本,请勿在生产环境中使用; v3.0.0会与v2.4.0及以上保证兼容关系, 可平顺升级;
|
- PRE-RELEASE: 此版本是v3.0.0的预发布版本,请勿在生产环境中使用; v3.0.0会与v2.4.0及以上保证兼容关系, 可平顺升级;
|
||||||
|
|
|
||||||
|
|
@ -1 +1 @@
|
||||||
25w20a
|
25w20b
|
||||||
|
|
@ -11,7 +11,7 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
logw = logger.Logw
|
logw = logger.Logw
|
||||||
LogDump = logger.LogDump
|
logDump = logger.LogDump
|
||||||
logDebug = logger.LogDebug
|
logDebug = logger.LogDebug
|
||||||
logInfo = logger.LogInfo
|
logInfo = logger.LogInfo
|
||||||
logWarning = logger.LogWarning
|
logWarning = logger.LogWarning
|
||||||
|
|
|
||||||
20
main.go
20
main.go
|
|
@ -301,46 +301,46 @@ func main() {
|
||||||
|
|
||||||
// 1. GitHub Releases/Archive - Use distinct path segments for type
|
// 1. GitHub Releases/Archive - Use distinct path segments for type
|
||||||
r.GET("/github.com/:username/:repo/releases/*filepath", func(ctx context.Context, c *app.RequestContext) { // Distinct path for releases
|
r.GET("/github.com/:username/:repo/releases/*filepath", func(ctx context.Context, c *app.RequestContext) { // Distinct path for releases
|
||||||
proxy.NoRouteHandler(cfg, limiter, iplimiter, runMode)(ctx, c)
|
proxy.NoRouteHandler(cfg, limiter, iplimiter)(ctx, c)
|
||||||
})
|
})
|
||||||
|
|
||||||
r.GET("/github.com/:username/:repo/archive/*filepath", func(ctx context.Context, c *app.RequestContext) { // Distinct path for archive
|
r.GET("/github.com/:username/:repo/archive/*filepath", func(ctx context.Context, c *app.RequestContext) { // Distinct path for archive
|
||||||
proxy.NoRouteHandler(cfg, limiter, iplimiter, runMode)(ctx, c)
|
proxy.NoRouteHandler(cfg, limiter, iplimiter)(ctx, c)
|
||||||
})
|
})
|
||||||
|
|
||||||
// 2. GitHub Blob/Raw - Use distinct path segments for type
|
// 2. GitHub Blob/Raw - Use distinct path segments for type
|
||||||
r.GET("/github.com/:username/:repo/blob/*filepath", func(ctx context.Context, c *app.RequestContext) { // Distinct path for blob
|
r.GET("/github.com/:username/:repo/blob/*filepath", func(ctx context.Context, c *app.RequestContext) { // Distinct path for blob
|
||||||
proxy.NoRouteHandler(cfg, limiter, iplimiter, runMode)(ctx, c)
|
proxy.NoRouteHandler(cfg, limiter, iplimiter)(ctx, c)
|
||||||
})
|
})
|
||||||
|
|
||||||
r.GET("/github.com/:username/:repo/raw/*filepath", func(ctx context.Context, c *app.RequestContext) { // Distinct path for raw
|
r.GET("/github.com/:username/:repo/raw/*filepath", func(ctx context.Context, c *app.RequestContext) { // Distinct path for raw
|
||||||
proxy.NoRouteHandler(cfg, limiter, iplimiter, runMode)(ctx, c)
|
proxy.NoRouteHandler(cfg, limiter, iplimiter)(ctx, c)
|
||||||
})
|
})
|
||||||
|
|
||||||
r.GET("/github.com/:username/:repo/info/*filepath", func(ctx context.Context, c *app.RequestContext) { // Distinct path for info
|
r.GET("/github.com/:username/:repo/info/*filepath", func(ctx context.Context, c *app.RequestContext) { // Distinct path for info
|
||||||
proxy.NoRouteHandler(cfg, limiter, iplimiter, runMode)(ctx, c)
|
proxy.NoRouteHandler(cfg, limiter, iplimiter)(ctx, c)
|
||||||
})
|
})
|
||||||
r.GET("/github.com/:username/:repo/git-upload-pack", func(ctx context.Context, c *app.RequestContext) {
|
r.GET("/github.com/:username/:repo/git-upload-pack", func(ctx context.Context, c *app.RequestContext) {
|
||||||
proxy.NoRouteHandler(cfg, limiter, iplimiter, runMode)(ctx, c)
|
proxy.NoRouteHandler(cfg, limiter, iplimiter)(ctx, c)
|
||||||
})
|
})
|
||||||
|
|
||||||
// 4. Raw GitHubusercontent - Keep as is (assuming it's distinct enough)
|
// 4. Raw GitHubusercontent - Keep as is (assuming it's distinct enough)
|
||||||
r.GET("/raw.githubusercontent.com/:username/:repo/*filepath", func(ctx context.Context, c *app.RequestContext) {
|
r.GET("/raw.githubusercontent.com/:username/:repo/*filepath", func(ctx context.Context, c *app.RequestContext) {
|
||||||
proxy.NoRouteHandler(cfg, limiter, iplimiter, runMode)(ctx, c)
|
proxy.NoRouteHandler(cfg, limiter, iplimiter)(ctx, c)
|
||||||
})
|
})
|
||||||
|
|
||||||
// 5. Gist GitHubusercontent - Keep as is (assuming it's distinct enough)
|
// 5. Gist GitHubusercontent - Keep as is (assuming it's distinct enough)
|
||||||
r.GET("/gist.githubusercontent.com/:username/*filepath", func(ctx context.Context, c *app.RequestContext) {
|
r.GET("/gist.githubusercontent.com/:username/*filepath", func(ctx context.Context, c *app.RequestContext) {
|
||||||
proxy.NoRouteHandler(cfg, limiter, iplimiter, runMode)(ctx, c)
|
proxy.NoRouteHandler(cfg, limiter, iplimiter)(ctx, c)
|
||||||
})
|
})
|
||||||
|
|
||||||
// 6. GitHub API Repos - Keep as is (assuming it's distinct enough)
|
// 6. GitHub API Repos - Keep as is (assuming it's distinct enough)
|
||||||
r.GET("/api.github.com/repos/:username/:repo/*filepath", func(ctx context.Context, c *app.RequestContext) {
|
r.GET("/api.github.com/repos/:username/:repo/*filepath", func(ctx context.Context, c *app.RequestContext) {
|
||||||
proxy.NoRouteHandler(cfg, limiter, iplimiter, runMode)(ctx, c)
|
proxy.NoRouteHandler(cfg, limiter, iplimiter)(ctx, c)
|
||||||
})
|
})
|
||||||
|
|
||||||
r.NoRoute(func(ctx context.Context, c *app.RequestContext) {
|
r.NoRoute(func(ctx context.Context, c *app.RequestContext) {
|
||||||
proxy.NoRouteHandler(cfg, limiter, iplimiter, runMode)(ctx, c)
|
proxy.NoRouteHandler(cfg, limiter, iplimiter)(ctx, c)
|
||||||
})
|
})
|
||||||
|
|
||||||
fmt.Printf("GHProxy Version: %s\n", version)
|
fmt.Printf("GHProxy Version: %s\n", version)
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,6 @@ func AuthPassThrough(c *app.RequestContext, cfg *config.Config, req *http.Reques
|
||||||
} else {
|
} else {
|
||||||
logWarning("%s %s %s %s %s Auth-Error: Conflict Auth Method", c.ClientIP(), c.Request.Method, string(c.Path()), c.UserAgent(), c.Request.Header.GetProtocol())
|
logWarning("%s %s %s %s %s Auth-Error: Conflict Auth Method", c.ClientIP(), c.Request.Method, string(c.Path()), c.UserAgent(), c.Request.Header.GetProtocol())
|
||||||
// 500 Internal Server Error
|
// 500 Internal Server Error
|
||||||
//c.JSON(http.StatusInternalServerError, gin.H{"error": "Conflict Auth Method"})
|
|
||||||
c.JSON(http.StatusInternalServerError, map[string]string{"error": "Conflict Auth Method"})
|
c.JSON(http.StatusInternalServerError, map[string]string{"error": "Conflict Auth Method"})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -30,7 +29,6 @@ func AuthPassThrough(c *app.RequestContext, cfg *config.Config, req *http.Reques
|
||||||
default:
|
default:
|
||||||
logWarning("%s %s %s %s %s Invalid Auth Method / Auth Method is not be set", c.ClientIP(), c.Request.Method, string(c.Path()), c.UserAgent(), c.Request.Header.GetProtocol())
|
logWarning("%s %s %s %s %s Invalid Auth Method / Auth Method is not be set", c.ClientIP(), c.Request.Method, string(c.Path()), c.UserAgent(), c.Request.Header.GetProtocol())
|
||||||
// 500 Internal Server Error
|
// 500 Internal Server Error
|
||||||
//c.JSON(http.StatusInternalServerError, gin.H{"error": "Invalid Auth Method / Auth Method is not be set"})
|
|
||||||
c.JSON(http.StatusInternalServerError, map[string]string{"error": "Invalid Auth Method / Auth Method is not be set"})
|
c.JSON(http.StatusInternalServerError, map[string]string{"error": "Invalid Auth Method / Auth Method is not be set"})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,10 +9,8 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/cloudwego/hertz/pkg/app"
|
|
||||||
//hclient "github.com/cloudwego/hertz/pkg/app/client"
|
|
||||||
//"github.com/cloudwego/hertz/pkg/protocol"
|
|
||||||
"github.com/WJQSERVER-STUDIO/go-utils/hwriter"
|
"github.com/WJQSERVER-STUDIO/go-utils/hwriter"
|
||||||
|
"github.com/cloudwego/hertz/pkg/app"
|
||||||
hresp "github.com/cloudwego/hertz/pkg/protocol/http1/resp"
|
hresp "github.com/cloudwego/hertz/pkg/protocol/http1/resp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -87,7 +85,7 @@ func ChunkedProxyRequest(ctx context.Context, c *app.RequestContext, u string, c
|
||||||
if err == nil && size > sizelimit {
|
if err == nil && size > sizelimit {
|
||||||
finalURL := resp.Request.URL.String()
|
finalURL := resp.Request.URL.String()
|
||||||
c.Redirect(http.StatusMovedPermanently, []byte(finalURL))
|
c.Redirect(http.StatusMovedPermanently, []byte(finalURL))
|
||||||
logWarning("%s %s %s %s %s Final-URL: %s Size-Limit-Exceeded: %d", c.ClientIP(), c.Request.Method, c.Path(), c.Request.Header.Get("User-Agent"), c.Request.Header.GetProtocol(), finalURL, size)
|
logWarning("%s %s %s %s %s Final-URL: %s Size-Limit-Exceeded: %d", c.ClientIP(), c.Request.Method, c.Path(), c.UserAgent(), c.Request.Header.GetProtocol(), finalURL, size)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -132,18 +130,7 @@ func ChunkedProxyRequest(ctx context.Context, c *app.RequestContext, u string, c
|
||||||
logInfo("Is Shell: %s %s %s %s %s", c.ClientIP(), method, u, c.Request.Header.Get("User-Agent"), c.Request.Header.GetProtocol())
|
logInfo("Is Shell: %s %s %s %s %s", c.ClientIP(), method, u, c.Request.Header.Get("User-Agent"), c.Request.Header.GetProtocol())
|
||||||
c.Header("Content-Length", "")
|
c.Header("Content-Length", "")
|
||||||
|
|
||||||
ProcessLinksAndWriteChunked(resp.Body, compress, string(c.Request.Host()), cfg, c)
|
err := ProcessLinksAndWriteChunked(resp.Body, compress, string(c.Request.Host()), cfg, c)
|
||||||
|
|
||||||
/*
|
|
||||||
presp, err := processLinks(resp.Body, compress, string(c.Request.Host()), cfg)
|
|
||||||
if err != nil {
|
|
||||||
logError("Failed to process links: %v", err)
|
|
||||||
WriteChunkedBody(resp.Body, c)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer presp.Close()
|
|
||||||
WriteChunkedBody(presp, c)
|
|
||||||
*/
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError("%s %s %s %s %s Failed to copy response body: %v", c.ClientIP(), method, u, c.Request.Header.Get("User-Agent"), c.Request.Header.GetProtocol(), err)
|
logError("%s %s %s %s %s Failed to copy response body: %v", c.ClientIP(), method, u, c.Request.Header.Get("User-Agent"), c.Request.Header.GetProtocol(), err)
|
||||||
|
|
@ -152,7 +139,6 @@ func ChunkedProxyRequest(ctx context.Context, c *app.RequestContext, u string, c
|
||||||
c.Flush() // 确保刷入
|
c.Flush() // 确保刷入
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
//WriteChunkedBody(resp.Body, c)
|
|
||||||
err = hwriter.Writer(resp.Body, c)
|
err = hwriter.Writer(resp.Body, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError("%s %s %s %s %s Failed to copy response body: %v", c.ClientIP(), method, u, c.Request.Header.Get("User-Agent"), c.Request.Header.GetProtocol(), err)
|
logError("%s %s %s %s %s Failed to copy response body: %v", c.ClientIP(), method, u, c.Request.Header.Get("User-Agent"), c.Request.Header.GetProtocol(), err)
|
||||||
|
|
|
||||||
|
|
@ -7,15 +7,13 @@ import (
|
||||||
"ghproxy/config"
|
"ghproxy/config"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/WJQSERVER-STUDIO/go-utils/hwriter"
|
"github.com/WJQSERVER-STUDIO/go-utils/hwriter"
|
||||||
"github.com/cloudwego/hertz/pkg/app"
|
"github.com/cloudwego/hertz/pkg/app"
|
||||||
)
|
)
|
||||||
|
|
||||||
func GitReq(ctx context.Context, c *app.RequestContext, u string, cfg *config.Config, mode string, runMode string) {
|
func GitReq(ctx context.Context, c *app.RequestContext, u string, cfg *config.Config, mode string) {
|
||||||
method := string(c.Request.Method())
|
method := string(c.Request.Method())
|
||||||
|
|
||||||
logDump("Url Before FMT:%s", u)
|
logDump("Url Before FMT:%s", u)
|
||||||
|
|
@ -116,21 +114,6 @@ func GitReq(ctx context.Context, c *app.RequestContext, u string, cfg *config.Co
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Status(resp.StatusCode)
|
c.Status(resp.StatusCode)
|
||||||
/*
|
|
||||||
// 使用固定32KB缓冲池
|
|
||||||
buffer := BufferPool.Get().([]byte)
|
|
||||||
defer BufferPool.Put(buffer)
|
|
||||||
|
|
||||||
_, err = io.CopyBuffer(c.Writer, resp.Body, buffer)
|
|
||||||
if err != nil {
|
|
||||||
logError("%s %s %s %s %s Failed to copy response body: %v", c.ClientIP(), method, u, c.Request.Header.Get("User-Agent"), c.Request.Header.GetProtocol(), err)
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
c.Writer.Flush() // 确保刷入
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
//_, err = copyb.CopyBuffer(c, resp.Body, nil)
|
|
||||||
err = hwriter.Writer(resp.Body, c)
|
err = hwriter.Writer(resp.Body, c)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -142,35 +125,3 @@ func GitReq(ctx context.Context, c *app.RequestContext, u string, cfg *config.Co
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractParts 从给定的 URL 中提取所需的部分
|
|
||||||
func extractParts(rawURL string) (string, string, string, url.Values, error) {
|
|
||||||
// 解析 URL
|
|
||||||
parsedURL, err := url.Parse(rawURL)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// 获取路径部分并分割
|
|
||||||
pathParts := strings.Split(parsedURL.Path, "/")
|
|
||||||
|
|
||||||
// 提取所需的部分
|
|
||||||
if len(pathParts) < 3 {
|
|
||||||
return "", "", "", nil, fmt.Errorf("URL path is too short")
|
|
||||||
}
|
|
||||||
|
|
||||||
// 提取 /WJQSERVER-STUDIO 和 /go-utils.git
|
|
||||||
repoOwner := "/" + pathParts[1]
|
|
||||||
repoName := "/" + pathParts[2]
|
|
||||||
|
|
||||||
// 剩余部分
|
|
||||||
remainingPath := strings.Join(pathParts[3:], "/")
|
|
||||||
if remainingPath != "" {
|
|
||||||
remainingPath = "/" + remainingPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// 查询参数
|
|
||||||
queryParams := parsedURL.Query()
|
|
||||||
|
|
||||||
return repoOwner, repoName, remainingPath, queryParams, nil
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ import (
|
||||||
|
|
||||||
var re = regexp.MustCompile(`^(http:|https:)?/?/?(.*)`) // 匹配http://或https://开头的路径
|
var re = regexp.MustCompile(`^(http:|https:)?/?/?(.*)`) // 匹配http://或https://开头的路径
|
||||||
|
|
||||||
func NoRouteHandler(cfg *config.Config, limiter *rate.RateLimiter, iplimiter *rate.IPRateLimiter, runMode string) app.HandlerFunc {
|
func NoRouteHandler(cfg *config.Config, limiter *rate.RateLimiter, iplimiter *rate.IPRateLimiter) app.HandlerFunc {
|
||||||
return func(ctx context.Context, c *app.RequestContext) {
|
return func(ctx context.Context, c *app.RequestContext) {
|
||||||
|
|
||||||
// 限制访问频率
|
// 限制访问频率
|
||||||
|
|
@ -35,14 +35,12 @@ func NoRouteHandler(cfg *config.Config, limiter *rate.RateLimiter, iplimiter *ra
|
||||||
}
|
}
|
||||||
|
|
||||||
if !allowed {
|
if !allowed {
|
||||||
//c.JSON(http.StatusTooManyRequests, gin.H{"error": "Too Many Requests"})
|
|
||||||
c.JSON(http.StatusTooManyRequests, map[string]string{"error": "Too Many Requests"})
|
c.JSON(http.StatusTooManyRequests, map[string]string{"error": "Too Many Requests"})
|
||||||
logWarning("%s %s %s %s %s 429-TooManyRequests", c.ClientIP(), c.Request.Method, c.Request.RequestURI(), c.Request.Header.UserAgent(), c.Request.Header.GetProtocol())
|
logWarning("%s %s %s %s %s 429-TooManyRequests", c.ClientIP(), c.Request.Method, c.Request.RequestURI(), c.Request.Header.UserAgent(), c.Request.Header.GetProtocol())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//rawPath := strings.TrimPrefix(c.Request.URL.Path, "/") // 去掉前缀/
|
|
||||||
rawPath := strings.TrimPrefix(string(c.Request.RequestURI()), "/") // 去掉前缀/
|
rawPath := strings.TrimPrefix(string(c.Request.RequestURI()), "/") // 去掉前缀/
|
||||||
matches := re.FindStringSubmatch(rawPath) // 匹配路径
|
matches := re.FindStringSubmatch(rawPath) // 匹配路径
|
||||||
logInfo("Matches: %v", matches)
|
logInfo("Matches: %v", matches)
|
||||||
|
|
@ -75,18 +73,16 @@ func NoRouteHandler(cfg *config.Config, limiter *rate.RateLimiter, iplimiter *ra
|
||||||
|
|
||||||
logInfo("%s %s %s %s %s Matched-Username: %s, Matched-Repo: %s", c.ClientIP(), c.Request.Method, rawPath, c.Request.Header.UserAgent(), c.Request.Header.GetProtocol(), username, repo)
|
logInfo("%s %s %s %s %s Matched-Username: %s, Matched-Repo: %s", c.ClientIP(), c.Request.Method, rawPath, c.Request.Header.UserAgent(), c.Request.Header.GetProtocol(), username, repo)
|
||||||
// dump log 记录详细信息 c.ClientIP(), c.Request.Method, rawPath,c.Request.Header.UserAgent(), c.Request.Header.GetProtocol(), full Header
|
// dump log 记录详细信息 c.ClientIP(), c.Request.Method, rawPath,c.Request.Header.UserAgent(), c.Request.Header.GetProtocol(), full Header
|
||||||
logDump("%s %s %s %s %s %s", c.ClientIP(), c.Request.Method, rawPath, c.Request.Header.UserAgent(), c.Request.Header.GetProtocol(), c.Request.Header)
|
logDump("%s %s %s %s %s %s", c.ClientIP(), c.Request.Method, rawPath, c.Request.Header.UserAgent(), c.Request.Header.GetProtocol(), c.Request.Header.Header())
|
||||||
repouser := fmt.Sprintf("%s/%s", username, repo)
|
repouser := fmt.Sprintf("%s/%s", username, repo)
|
||||||
|
|
||||||
// 白名单检查
|
// 白名单检查
|
||||||
if cfg.Whitelist.Enabled {
|
if cfg.Whitelist.Enabled {
|
||||||
whitelist := auth.CheckWhitelist(username, repo)
|
whitelist := auth.CheckWhitelist(username, repo)
|
||||||
if !whitelist {
|
if !whitelist {
|
||||||
logErrMsg := fmt.Sprintf("%s %s %s %s %s Whitelist Blocked repo: %s", c.ClientIP(), c.Request.Method, rawPath, c.Request.Header.UserAgent(), c.Request.Header.GetProtocol(), repouser)
|
|
||||||
errMsg := fmt.Sprintf("Whitelist Blocked repo: %s", repouser)
|
errMsg := fmt.Sprintf("Whitelist Blocked repo: %s", repouser)
|
||||||
//c.JSON(http.StatusForbidden, gin.H{"error": errMsg})
|
|
||||||
c.JSON(http.StatusForbidden, map[string]string{"error": errMsg})
|
c.JSON(http.StatusForbidden, map[string]string{"error": errMsg})
|
||||||
logWarning(logErrMsg)
|
logWarning("%s %s %s %s %s Whitelist Blocked repo: %s", c.ClientIP(), c.Request.Method, rawPath, c.Request.Header.UserAgent(), c.Request.Header.GetProtocol(), repouser)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -95,24 +91,13 @@ func NoRouteHandler(cfg *config.Config, limiter *rate.RateLimiter, iplimiter *ra
|
||||||
if cfg.Blacklist.Enabled {
|
if cfg.Blacklist.Enabled {
|
||||||
blacklist := auth.CheckBlacklist(username, repo)
|
blacklist := auth.CheckBlacklist(username, repo)
|
||||||
if blacklist {
|
if blacklist {
|
||||||
logErrMsg := fmt.Sprintf("%s %s %s %s %s Blacklist Blocked repo: %s", c.ClientIP(), c.Request.Method, rawPath, c.Request.Header.UserAgent(), c.Request.Header.GetProtocol(), repouser)
|
|
||||||
errMsg := fmt.Sprintf("Blacklist Blocked repo: %s", repouser)
|
errMsg := fmt.Sprintf("Blacklist Blocked repo: %s", repouser)
|
||||||
//c.JSON(http.StatusForbidden, gin.H{"error": errMsg})
|
|
||||||
c.JSON(http.StatusForbidden, map[string]string{"error": errMsg})
|
c.JSON(http.StatusForbidden, map[string]string{"error": errMsg})
|
||||||
logWarning(logErrMsg)
|
logWarning("%s %s %s %s %s Blacklist Blocked repo: %s", c.ClientIP(), c.Request.Method, rawPath, c.Request.Header.UserAgent(), c.Request.Header.GetProtocol(), repouser)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
matches = CheckURL(rawPath, c)
|
|
||||||
if matches == nil {
|
|
||||||
c.AbortWithStatus(http.StatusNotFound)
|
|
||||||
logWarning("%s %s %s %s %s 404-NOMATCH", c.ClientIP(), c.Request.Method, rawPath,c.Request.Header.UserAgent(), c.Request.Header.GetProtocol())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// 若匹配api.github.com/repos/用户名/仓库名/路径, 则检查是否开启HeaderAuth
|
// 若匹配api.github.com/repos/用户名/仓库名/路径, 则检查是否开启HeaderAuth
|
||||||
|
|
||||||
// 处理blob/raw路径
|
// 处理blob/raw路径
|
||||||
|
|
@ -137,8 +122,7 @@ func NoRouteHandler(cfg *config.Config, limiter *rate.RateLimiter, iplimiter *ra
|
||||||
case "releases", "blob", "raw", "gist", "api":
|
case "releases", "blob", "raw", "gist", "api":
|
||||||
ChunkedProxyRequest(ctx, c, rawPath, cfg, matcher)
|
ChunkedProxyRequest(ctx, c, rawPath, cfg, matcher)
|
||||||
case "clone":
|
case "clone":
|
||||||
//ProxyRequest(c, rawPath, cfg, "git", runMode)
|
GitReq(ctx, c, rawPath, cfg, "git")
|
||||||
GitReq(ctx, c, rawPath, cfg, "git", runMode)
|
|
||||||
default:
|
default:
|
||||||
c.String(http.StatusForbidden, "Invalid input.")
|
c.String(http.StatusForbidden, "Invalid input.")
|
||||||
fmt.Println("Invalid input.")
|
fmt.Println("Invalid input.")
|
||||||
|
|
@ -146,16 +130,3 @@ func NoRouteHandler(cfg *config.Config, limiter *rate.RateLimiter, iplimiter *ra
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
func CheckURL(u string, c *gin.Context) []string {
|
|
||||||
for _, exp := range exps {
|
|
||||||
if matches := exp.FindStringSubmatch(u); matches != nil {
|
|
||||||
return matches[1:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
errMsg := fmt.Sprintf("%s %s %s %s %s Invalid URL", c.ClientIP(), c.Request.Method, u,c.Request.Header.UserAgent(), c.Request.Header.GetProtocol())
|
|
||||||
logError(errMsg)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
|
||||||
|
|
@ -2,12 +2,12 @@ package proxy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"fmt"
|
"fmt"
|
||||||
"ghproxy/config"
|
"ghproxy/config"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
|
@ -211,128 +211,8 @@ func matchString(target string, stringsToMatch []string) bool {
|
||||||
return exists
|
return exists
|
||||||
}
|
}
|
||||||
|
|
||||||
// processLinks 处理链接并返回一个 io.ReadCloser
|
|
||||||
func processLinks(input io.Reader, compress string, host string, cfg *config.Config) (io.ReadCloser, error) {
|
|
||||||
var reader *bufio.Reader
|
|
||||||
|
|
||||||
if compress == "gzip" {
|
|
||||||
// 解压 gzip
|
|
||||||
gzipReader, err := gzip.NewReader(input)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("gzip 解压错误: %w", err)
|
|
||||||
}
|
|
||||||
reader = bufio.NewReader(gzipReader)
|
|
||||||
} else {
|
|
||||||
reader = bufio.NewReader(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 创建一个缓冲区用于存储输出
|
|
||||||
var outputBuffer io.Writer
|
|
||||||
var gzipWriter *gzip.Writer
|
|
||||||
var output io.ReadCloser
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
if compress == "gzip" {
|
|
||||||
// 创建一个管道来连接 gzipWriter 和 output
|
|
||||||
pipeReader, pipeWriter := io.Pipe() // 创建一个管道
|
|
||||||
output = pipeReader // 将管道的读取端作为输出
|
|
||||||
outputBuffer = pipeWriter // 将管道的写入端作为 outputBuffer
|
|
||||||
gzipWriter = gzip.NewWriter(outputBuffer)
|
|
||||||
go func() {
|
|
||||||
defer pipeWriter.Close() // 确保在 goroutine 结束时关闭 pipeWriter
|
|
||||||
writer := bufio.NewWriter(gzipWriter)
|
|
||||||
defer func() {
|
|
||||||
if err := writer.Flush(); err != nil {
|
|
||||||
logError("gzip writer 刷新失败: %v", err)
|
|
||||||
}
|
|
||||||
if err := gzipWriter.Close(); err != nil {
|
|
||||||
logError("gzipWriter 关闭失败: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(reader)
|
|
||||||
urlPattern := regexp.MustCompile(`https?://[^\s'"]+`)
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
modifiedLine := urlPattern.ReplaceAllStringFunc(line, func(originalURL string) string {
|
|
||||||
return modifyURL(originalURL, host, cfg)
|
|
||||||
})
|
|
||||||
if _, err := writer.WriteString(modifiedLine + "\n"); err != nil {
|
|
||||||
logError("写入 gzipWriter 失败: %v", err)
|
|
||||||
return // 在发生错误时退出 goroutine
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
logError("读取输入错误: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
} else {
|
|
||||||
outputBuffer = &buf
|
|
||||||
writer := bufio.NewWriter(outputBuffer)
|
|
||||||
defer func() {
|
|
||||||
if err := writer.Flush(); err != nil {
|
|
||||||
logError("writer 刷新失败: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
urlPattern := regexp.MustCompile(`https?://[^\s'"]+`)
|
|
||||||
scanner := bufio.NewScanner(reader)
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
modifiedLine := urlPattern.ReplaceAllStringFunc(line, func(originalURL string) string {
|
|
||||||
return modifyURL(originalURL, host, cfg)
|
|
||||||
})
|
|
||||||
if _, err := writer.WriteString(modifiedLine + "\n"); err != nil {
|
|
||||||
return nil, fmt.Errorf("写入文件错误: %w", err) // 传递错误
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return nil, fmt.Errorf("读取行错误: %w", err) // 传递错误
|
|
||||||
}
|
|
||||||
output = io.NopCloser(&buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
return output, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func WriteChunkedBody(resp io.ReadCloser, c *app.RequestContext) {
|
|
||||||
defer resp.Close()
|
|
||||||
|
|
||||||
c.Response.HijackWriter(hresp.NewChunkedBodyWriter(&c.Response, c.GetWriter()))
|
|
||||||
|
|
||||||
bufWrapper := bytebufferpool.Get()
|
|
||||||
buf := bufWrapper.B
|
|
||||||
size := 32768 // 32KB
|
|
||||||
buf = buf[:cap(buf)]
|
|
||||||
if len(buf) < size {
|
|
||||||
buf = append(buf, make([]byte, size-len(buf))...)
|
|
||||||
}
|
|
||||||
buf = buf[:size] // 将缓冲区限制为 'size'
|
|
||||||
defer bytebufferpool.Put(bufWrapper)
|
|
||||||
|
|
||||||
for {
|
|
||||||
n, err := resp.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
break // 读取到文件末尾
|
|
||||||
}
|
|
||||||
fmt.Println("读取错误:", err)
|
|
||||||
c.String(http.StatusInternalServerError, "读取错误")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = c.Write(buf[:n]) // 写入 chunk
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("写入 chunk 错误:", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Flush() // 刷新 chunk 到客户端
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// processLinksAndWriteChunked 处理链接并将结果以 chunked 方式写入响应
|
// processLinksAndWriteChunked 处理链接并将结果以 chunked 方式写入响应
|
||||||
func ProcessLinksAndWriteChunked(input io.Reader, compress string, host string, cfg *config.Config, c *app.RequestContext) {
|
func ProcessLinksAndWriteChunked(input io.Reader, compress string, host string, cfg *config.Config, c *app.RequestContext) error {
|
||||||
var reader *bufio.Reader
|
var reader *bufio.Reader
|
||||||
|
|
||||||
if compress == "gzip" {
|
if compress == "gzip" {
|
||||||
|
|
@ -340,7 +220,7 @@ func ProcessLinksAndWriteChunked(input io.Reader, compress string, host string,
|
||||||
gzipReader, err := gzip.NewReader(input)
|
gzipReader, err := gzip.NewReader(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.String(http.StatusInternalServerError, fmt.Sprintf("gzip 解压错误: %v", err))
|
c.String(http.StatusInternalServerError, fmt.Sprintf("gzip 解压错误: %v", err))
|
||||||
return
|
return fmt.Errorf("gzip 解压错误: %w", err)
|
||||||
}
|
}
|
||||||
defer gzipReader.Close()
|
defer gzipReader.Close()
|
||||||
reader = bufio.NewReader(gzipReader)
|
reader = bufio.NewReader(gzipReader)
|
||||||
|
|
@ -386,13 +266,13 @@ func ProcessLinksAndWriteChunked(input io.Reader, compress string, host string,
|
||||||
_, err := writer.Write([]byte(modifiedLineWithNewline))
|
_, err := writer.Write([]byte(modifiedLineWithNewline))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError("写入 chunk 错误: %v", err)
|
logError("写入 chunk 错误: %v", err)
|
||||||
return // 发生错误时退出
|
return fmt.Errorf("写入 chunk 错误: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if compress != "gzip" {
|
if compress != "gzip" {
|
||||||
if fErr := chunkedWriter.Flush(); fErr != nil {
|
if fErr := chunkedWriter.Flush(); fErr != nil {
|
||||||
logError("chunkedWriter flush failed: %v", fErr)
|
logError("chunkedWriter flush failed: %v", fErr)
|
||||||
return
|
return fmt.Errorf("chunkedWriter flush failed: %w", fErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -400,125 +280,48 @@ func ProcessLinksAndWriteChunked(input io.Reader, compress string, host string,
|
||||||
if err := scanner.Err(); err != nil {
|
if err := scanner.Err(); err != nil {
|
||||||
logError("读取输入错误: %v", err)
|
logError("读取输入错误: %v", err)
|
||||||
c.String(http.StatusInternalServerError, fmt.Sprintf("读取输入错误: %v", err))
|
c.String(http.StatusInternalServerError, fmt.Sprintf("读取输入错误: %v", err))
|
||||||
return
|
return fmt.Errorf("读取输入错误: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 对于 gzip,chunkedWriter 的关闭会触发最后的 chunk
|
// 对于 gzip,chunkedWriter 的关闭会触发最后的 chunk
|
||||||
if compress != "gzip" {
|
if compress != "gzip" {
|
||||||
if fErr := chunkedWriter.Flush(); fErr != nil {
|
if fErr := chunkedWriter.Flush(); fErr != nil {
|
||||||
logError("final chunkedWriter flush failed: %v", fErr)
|
logError("final chunkedWriter flush failed: %v", fErr)
|
||||||
|
return fmt.Errorf("final chunkedWriter flush failed: %w", fErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil // 成功完成处理
|
||||||
}
|
}
|
||||||
|
|
||||||
func ProcessAndWriteChunkedBody(input io.Reader, compress string, host string, cfg *config.Config, c *app.RequestContext) error {
|
// extractParts 从给定的 URL 中提取所需的部分
|
||||||
var reader *bufio.Reader
|
func extractParts(rawURL string) (string, string, string, url.Values, error) {
|
||||||
|
// 解析 URL
|
||||||
if compress == "gzip" {
|
parsedURL, err := url.Parse(rawURL)
|
||||||
// 解压gzip
|
|
||||||
gzipReader, err := gzip.NewReader(input)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("gzip解压错误: %v", err)
|
return "", "", "", nil, err
|
||||||
}
|
|
||||||
defer gzipReader.Close()
|
|
||||||
reader = bufio.NewReader(gzipReader)
|
|
||||||
} else {
|
|
||||||
reader = bufio.NewReader(input)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 创建一个缓冲区用于存储输出
|
// 获取路径部分并分割
|
||||||
var outputBuffer io.Writer
|
pathParts := strings.Split(parsedURL.Path, "/")
|
||||||
var gzipWriter *gzip.Writer
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
if compress == "gzip" {
|
// 提取所需的部分
|
||||||
// 创建一个缓冲区
|
if len(pathParts) < 3 {
|
||||||
outputBuffer = &buf
|
return "", "", "", nil, fmt.Errorf("URL path is too short")
|
||||||
gzipWriter = gzip.NewWriter(outputBuffer)
|
|
||||||
defer func() {
|
|
||||||
if gzipWriter != nil {
|
|
||||||
if closeErr := gzipWriter.Close(); closeErr != nil {
|
|
||||||
logError("gzipWriter close failed %v", closeErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
} else {
|
|
||||||
outputBuffer = &buf
|
|
||||||
}
|
}
|
||||||
|
|
||||||
writer := bufio.NewWriter(outputBuffer)
|
// 提取 /WJQSERVER-STUDIO 和 /go-utils.git
|
||||||
defer func() {
|
repoOwner := "/" + pathParts[1]
|
||||||
if flushErr := writer.Flush(); flushErr != nil {
|
repoName := "/" + pathParts[2]
|
||||||
logError("writer flush failed %v", flushErr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// 使用正则表达式匹配 http 和 https 链接
|
// 剩余部分
|
||||||
urlPattern := regexp.MustCompile(`https?://[^\s'"]+`)
|
remainingPath := strings.Join(pathParts[3:], "/")
|
||||||
for {
|
if remainingPath != "" {
|
||||||
line, err := reader.ReadString('\n')
|
remainingPath = "/" + remainingPath
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
break // 文件结束
|
|
||||||
}
|
|
||||||
return fmt.Errorf("读取行错误: %v", err) // 传递错误
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 替换所有匹配的 URL
|
// 查询参数
|
||||||
modifiedLine := urlPattern.ReplaceAllStringFunc(line, func(originalURL string) string {
|
queryParams := parsedURL.Query()
|
||||||
return modifyURL(originalURL, host, cfg)
|
|
||||||
})
|
|
||||||
|
|
||||||
_, werr := writer.WriteString(modifiedLine)
|
return repoOwner, repoName, remainingPath, queryParams, nil
|
||||||
if werr != nil {
|
|
||||||
return fmt.Errorf("写入文件错误: %v", werr) // 传递错误
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 在返回之前,再刷新一次
|
|
||||||
if fErr := writer.Flush(); fErr != nil {
|
|
||||||
return fErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if compress == "gzip" {
|
|
||||||
if err := gzipWriter.Close(); err != nil {
|
|
||||||
return fmt.Errorf("gzipWriter close failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 将处理后的内容以分块的方式写入响应
|
|
||||||
c.Response.HijackWriter(hresp.NewChunkedBodyWriter(&c.Response, c.GetWriter()))
|
|
||||||
|
|
||||||
bufWrapper := bytebufferpool.Get()
|
|
||||||
bbuf := bufWrapper.B
|
|
||||||
size := 32768 // 32KB
|
|
||||||
if cap(bbuf) < size {
|
|
||||||
bbuf = make([]byte, size)
|
|
||||||
} else {
|
|
||||||
bbuf = bbuf[:size]
|
|
||||||
}
|
|
||||||
defer bytebufferpool.Put(bufWrapper)
|
|
||||||
|
|
||||||
// 将缓冲区内容写入响应
|
|
||||||
for {
|
|
||||||
n, err := buf.Read(bbuf)
|
|
||||||
if err != nil {
|
|
||||||
if err != io.EOF {
|
|
||||||
fmt.Println("读取错误:", err)
|
|
||||||
c.String(http.StatusInternalServerError, "读取错误")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
break // 读取到文件末尾
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = c.Write(bbuf[:n]) // 写入 chunk
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("写入 chunk 错误:", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Flush() // 刷新 chunk 到客户端
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
Loading…
Add table
Add a link
Reference in a new issue