mirror of
https://github.com/tobychui/zoraxy.git
synced 2025-10-30 05:24:06 +01:00
Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b3aa97fdff | ||
|
|
5dc89b812d | ||
|
|
7829e5a321 | ||
|
|
42d6b61770 | ||
|
|
5f420b4636 | ||
|
|
3d79a19cd0 | ||
|
|
9d0a2a94f7 | ||
|
|
f9ef648664 | ||
|
|
8f95b622ff | ||
|
|
fa941e26a7 | ||
|
|
72e5d3ce3f | ||
|
|
7efc7da9ab | ||
|
|
944a8651ea | ||
|
|
f3143e52b3 | ||
|
|
5c8e4a7df1 | ||
|
|
0f295185f1 | ||
|
|
322e5239a8 | ||
|
|
71bf844dc1 | ||
|
|
298444a53f | ||
|
|
41cf0cc2c7 | ||
|
|
863113a1b8 | ||
|
|
aa9036fa8b |
@@ -1,3 +1,10 @@
|
||||
# v3.2.8 16 Oct 2025
|
||||
|
||||
+ Fixed wildcard certificate bug [#845](https://github.com/tobychui/zoraxy/issues/845) by [zen8841](https://github.com/zen8841)
|
||||
+ Move function:NormalizeDomain to netutils module by [zen8841](https://github.com/zen8841)
|
||||
+ Add support for Proxy Protocol V1 and V2 in streamproxy configuration by [jemmy1794](https://github.com/jemmy1794)
|
||||
+ Added user selectable versions for TLS
|
||||
|
||||
# v3.2.7 09 Oct 2025
|
||||
|
||||
+ Update Sidebar CSS by [Saeraphinx](https://github.com/Saeraphinx)
|
||||
|
||||
@@ -386,7 +386,9 @@ func initAPIs(targetMux *http.ServeMux) {
|
||||
authRouter.HandleFunc("/api/log/read", LogViewer.HandleReadLog)
|
||||
authRouter.HandleFunc("/api/log/summary", LogViewer.HandleReadLogSummary)
|
||||
authRouter.HandleFunc("/api/log/errors", LogViewer.HandleLogErrorSummary)
|
||||
authRouter.HandleFunc("/api/log/rotate/debug.trigger", SystemWideLogger.HandleDebugTriggerLogRotation)
|
||||
authRouter.HandleFunc("/api/log/rotate/trigger", SystemWideLogger.HandleDebugTriggerLogRotation)
|
||||
authRouter.HandleFunc("/api/logger/config", handleLoggerConfig)
|
||||
|
||||
//Debug
|
||||
authRouter.HandleFunc("/api/info/pprof", pprof.Index)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
"imuslab.com/zoraxy/mod/dynamicproxy"
|
||||
"imuslab.com/zoraxy/mod/dynamicproxy/loadbalance"
|
||||
"imuslab.com/zoraxy/mod/info/logger"
|
||||
"imuslab.com/zoraxy/mod/tlscert"
|
||||
"imuslab.com/zoraxy/mod/utils"
|
||||
)
|
||||
@@ -366,3 +367,13 @@ func ImportConfigFromZip(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func handleLoggerConfig(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == http.MethodGet {
|
||||
logger.HandleGetLogConfig(CONF_LOG_CONFIG)(w, r)
|
||||
} else if r.Method == http.MethodPost {
|
||||
logger.HandleUpdateLogConfig(CONF_LOG_CONFIG, SystemWideLogger)(w, r)
|
||||
} else {
|
||||
utils.SendErrorResponse(w, "Method not allowed")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ import (
|
||||
const (
|
||||
/* Build Constants */
|
||||
SYSTEM_NAME = "Zoraxy"
|
||||
SYSTEM_VERSION = "3.2.8"
|
||||
SYSTEM_VERSION = "3.2.9"
|
||||
DEVELOPMENT_BUILD = false
|
||||
|
||||
/* System Constants */
|
||||
@@ -76,6 +76,7 @@ const (
|
||||
CONF_PATH_RULE = CONF_FOLDER + "/rules/pathrules"
|
||||
CONF_PLUGIN_GROUPS = CONF_FOLDER + "/plugin_groups.json"
|
||||
CONF_GEODB_PATH = CONF_FOLDER + "/geodb"
|
||||
CONF_LOG_CONFIG = CONF_FOLDER + "/log_conf.json"
|
||||
)
|
||||
|
||||
/* System Startup Flags */
|
||||
@@ -95,9 +96,9 @@ var (
|
||||
enableAutoUpdate = flag.Bool("cfgupgrade", true, "Enable auto config upgrade if breaking change is detected")
|
||||
|
||||
/* Logging Configuration Flags */
|
||||
enableLog = flag.Bool("enablelog", true, "Enable system wide logging, set to false for writing log to STDOUT only")
|
||||
enableLogCompression = flag.Bool("enablelogcompress", true, "Enable log compression for rotated log files")
|
||||
logRotate = flag.String("logrotate", "0", "Enable log rotation and set the maximum log file size in KB, also support K, M, G suffix (e.g. 200M), set to 0 to disable")
|
||||
enableLog = flag.Bool("enablelog", true, "Enable system wide logging, set to false for writing log to STDOUT only")
|
||||
//enableLogCompression = flag.Bool("enablelogcompress", true, "Enable log compression for rotated log files")
|
||||
//logRotate = flag.String("logrotate", "0", "Enable log rotation and set the maximum log file size in KB, also support K, M, G suffix (e.g. 200M), set to 0 to disable")
|
||||
|
||||
/* Default Configuration Flags */
|
||||
defaultInboundPort = flag.Int("default_inbound_port", 443, "Default web server listening port")
|
||||
|
||||
@@ -14,6 +14,7 @@ require (
|
||||
github.com/gorilla/sessions v1.2.2
|
||||
github.com/gorilla/websocket v1.5.1
|
||||
github.com/grandcat/zeroconf v1.0.0
|
||||
github.com/jellydator/ttlcache/v3 v3.4.0
|
||||
github.com/likexian/whois v1.15.1
|
||||
github.com/microcosm-cc/bluemonday v1.0.26
|
||||
github.com/monperrus/crawler-user-agents v1.1.0
|
||||
|
||||
@@ -445,6 +445,8 @@ github.com/infobloxopen/infoblox-go-client/v2 v2.10.0/go.mod h1:NeNJpz09efw/edzq
|
||||
github.com/jarcoal/httpmock v1.0.8/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik=
|
||||
github.com/jarcoal/httpmock v1.4.0 h1:BvhqnH0JAYbNudL2GMJKgOHe2CtKlzJ/5rWKyp+hc2k=
|
||||
github.com/jarcoal/httpmock v1.4.0/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
|
||||
github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY=
|
||||
github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4=
|
||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
|
||||
@@ -508,8 +508,8 @@ func (a *ACMEHandler) HandleRenewCertificate(w http.ResponseWriter, r *http.Requ
|
||||
dns = true
|
||||
}
|
||||
|
||||
// Default propagation timeout is 300 seconds
|
||||
propagationTimeout := 300
|
||||
// Default propagation timeout is 600 seconds (10 minutes)
|
||||
propagationTimeout := 600
|
||||
if dns {
|
||||
ppgTimeout, err := utils.PostPara(r, "ppgTimeout")
|
||||
if err == nil {
|
||||
|
||||
@@ -7,23 +7,33 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jellydator/ttlcache/v3"
|
||||
"golang.org/x/oauth2"
|
||||
"imuslab.com/zoraxy/mod/database"
|
||||
"imuslab.com/zoraxy/mod/info/logger"
|
||||
"imuslab.com/zoraxy/mod/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultOAuth2ConfigCacheTime defines the default cache duration for OAuth2 configuration
|
||||
DefaultOAuth2ConfigCacheTime = 60 * time.Second
|
||||
)
|
||||
|
||||
type OAuth2RouterOptions struct {
|
||||
OAuth2ServerURL string //The URL of the OAuth 2.0 server server
|
||||
OAuth2TokenURL string //The URL of the OAuth 2.0 token server
|
||||
OAuth2ClientId string //The client id for OAuth 2.0 Application
|
||||
OAuth2ClientSecret string //The client secret for OAuth 2.0 Application
|
||||
OAuth2WellKnownUrl string //The well-known url for OAuth 2.0 server
|
||||
OAuth2UserInfoUrl string //The URL of the OAuth 2.0 user info endpoint
|
||||
OAuth2Scopes string //The scopes for OAuth 2.0 Application
|
||||
Logger *logger.Logger
|
||||
Database *database.Database
|
||||
OAuth2ServerURL string //The URL of the OAuth 2.0 server server
|
||||
OAuth2TokenURL string //The URL of the OAuth 2.0 token server
|
||||
OAuth2ClientId string //The client id for OAuth 2.0 Application
|
||||
OAuth2ClientSecret string //The client secret for OAuth 2.0 Application
|
||||
OAuth2WellKnownUrl string //The well-known url for OAuth 2.0 server
|
||||
OAuth2UserInfoUrl string //The URL of the OAuth 2.0 user info endpoint
|
||||
OAuth2Scopes string //The scopes for OAuth 2.0 Application
|
||||
OAuth2CodeChallengeMethod string //The authorization code challenge method
|
||||
OAuth2ConfigurationCacheTime *time.Duration
|
||||
Logger *logger.Logger
|
||||
Database *database.Database
|
||||
OAuth2ConfigCache *ttlcache.Cache[string, *oauth2.Config]
|
||||
}
|
||||
|
||||
type OIDCDiscoveryDocument struct {
|
||||
@@ -57,11 +67,26 @@ func NewOAuth2Router(options *OAuth2RouterOptions) *OAuth2Router {
|
||||
options.Database.Read("oauth2", "oauth2ClientId", &options.OAuth2ClientId)
|
||||
options.Database.Read("oauth2", "oauth2ClientSecret", &options.OAuth2ClientSecret)
|
||||
options.Database.Read("oauth2", "oauth2UserInfoUrl", &options.OAuth2UserInfoUrl)
|
||||
options.Database.Read("oauth2", "oauth2CodeChallengeMethod", &options.OAuth2CodeChallengeMethod)
|
||||
options.Database.Read("oauth2", "oauth2Scopes", &options.OAuth2Scopes)
|
||||
options.Database.Read("oauth2", "oauth2ConfigurationCacheTime", &options.OAuth2ConfigurationCacheTime)
|
||||
|
||||
return &OAuth2Router{
|
||||
ar := &OAuth2Router{
|
||||
options: options,
|
||||
}
|
||||
|
||||
if options.OAuth2ConfigurationCacheTime == nil ||
|
||||
options.OAuth2ConfigurationCacheTime.Seconds() == 0 {
|
||||
cacheTime := DefaultOAuth2ConfigCacheTime
|
||||
options.OAuth2ConfigurationCacheTime = &cacheTime
|
||||
}
|
||||
|
||||
options.OAuth2ConfigCache = ttlcache.New[string, *oauth2.Config](
|
||||
ttlcache.WithTTL[string, *oauth2.Config](*options.OAuth2ConfigurationCacheTime),
|
||||
)
|
||||
go options.OAuth2ConfigCache.Start()
|
||||
|
||||
return ar
|
||||
}
|
||||
|
||||
// HandleSetOAuth2Settings is the internal handler for setting the OAuth URL and HTTPS
|
||||
@@ -81,13 +106,15 @@ func (ar *OAuth2Router) HandleSetOAuth2Settings(w http.ResponseWriter, r *http.R
|
||||
func (ar *OAuth2Router) handleSetOAuthSettingsGET(w http.ResponseWriter, r *http.Request) {
|
||||
//Return the current settings
|
||||
js, _ := json.Marshal(map[string]interface{}{
|
||||
"oauth2WellKnownUrl": ar.options.OAuth2WellKnownUrl,
|
||||
"oauth2ServerUrl": ar.options.OAuth2ServerURL,
|
||||
"oauth2TokenUrl": ar.options.OAuth2TokenURL,
|
||||
"oauth2UserInfoUrl": ar.options.OAuth2UserInfoUrl,
|
||||
"oauth2Scopes": ar.options.OAuth2Scopes,
|
||||
"oauth2ClientSecret": ar.options.OAuth2ClientSecret,
|
||||
"oauth2ClientId": ar.options.OAuth2ClientId,
|
||||
"oauth2WellKnownUrl": ar.options.OAuth2WellKnownUrl,
|
||||
"oauth2ServerUrl": ar.options.OAuth2ServerURL,
|
||||
"oauth2TokenUrl": ar.options.OAuth2TokenURL,
|
||||
"oauth2UserInfoUrl": ar.options.OAuth2UserInfoUrl,
|
||||
"oauth2Scopes": ar.options.OAuth2Scopes,
|
||||
"oauth2ClientSecret": ar.options.OAuth2ClientSecret,
|
||||
"oauth2ClientId": ar.options.OAuth2ClientId,
|
||||
"oauth2CodeChallengeMethod": ar.options.OAuth2CodeChallengeMethod,
|
||||
"oauth2ConfigurationCacheTime": ar.options.OAuth2ConfigurationCacheTime.String(),
|
||||
})
|
||||
|
||||
utils.SendJSONResponse(w, string(js))
|
||||
@@ -95,7 +122,8 @@ func (ar *OAuth2Router) handleSetOAuthSettingsGET(w http.ResponseWriter, r *http
|
||||
|
||||
func (ar *OAuth2Router) handleSetOAuthSettingsPOST(w http.ResponseWriter, r *http.Request) {
|
||||
//Update the settings
|
||||
var oauth2ServerUrl, oauth2TokenURL, oauth2Scopes, oauth2UserInfoUrl string
|
||||
var oauth2ServerUrl, oauth2TokenURL, oauth2Scopes, oauth2UserInfoUrl, oauth2CodeChallengeMethod string
|
||||
var oauth2ConfigurationCacheTime *time.Duration
|
||||
|
||||
oauth2ClientId, err := utils.PostPara(r, "oauth2ClientId")
|
||||
if err != nil {
|
||||
@@ -109,6 +137,18 @@ func (ar *OAuth2Router) handleSetOAuthSettingsPOST(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
oauth2CodeChallengeMethod, err = utils.PostPara(r, "oauth2CodeChallengeMethod")
|
||||
if err != nil {
|
||||
utils.SendErrorResponse(w, "oauth2CodeChallengeMethod not found")
|
||||
return
|
||||
}
|
||||
|
||||
oauth2ConfigurationCacheTime, err = utils.PostDuration(r, "oauth2ConfigurationCacheTime")
|
||||
if err != nil {
|
||||
utils.SendErrorResponse(w, "oauth2ConfigurationCacheTime not found")
|
||||
return
|
||||
}
|
||||
|
||||
oauth2WellKnownUrl, err := utils.PostPara(r, "oauth2WellKnownUrl")
|
||||
if err != nil {
|
||||
oauth2ServerUrl, err = utils.PostPara(r, "oauth2ServerUrl")
|
||||
@@ -146,6 +186,8 @@ func (ar *OAuth2Router) handleSetOAuthSettingsPOST(w http.ResponseWriter, r *htt
|
||||
ar.options.OAuth2ClientId = oauth2ClientId
|
||||
ar.options.OAuth2ClientSecret = oauth2ClientSecret
|
||||
ar.options.OAuth2Scopes = oauth2Scopes
|
||||
ar.options.OAuth2CodeChallengeMethod = oauth2CodeChallengeMethod
|
||||
ar.options.OAuth2ConfigurationCacheTime = oauth2ConfigurationCacheTime
|
||||
|
||||
//Write changes to database
|
||||
ar.options.Database.Write("oauth2", "oauth2WellKnownUrl", oauth2WellKnownUrl)
|
||||
@@ -155,6 +197,11 @@ func (ar *OAuth2Router) handleSetOAuthSettingsPOST(w http.ResponseWriter, r *htt
|
||||
ar.options.Database.Write("oauth2", "oauth2ClientId", oauth2ClientId)
|
||||
ar.options.Database.Write("oauth2", "oauth2ClientSecret", oauth2ClientSecret)
|
||||
ar.options.Database.Write("oauth2", "oauth2Scopes", oauth2Scopes)
|
||||
ar.options.Database.Write("oauth2", "oauth2CodeChallengeMethod", oauth2CodeChallengeMethod)
|
||||
ar.options.Database.Write("oauth2", "oauth2ConfigurationCacheTime", oauth2ConfigurationCacheTime)
|
||||
|
||||
// Flush caches
|
||||
ar.options.OAuth2ConfigCache.DeleteAll()
|
||||
|
||||
utils.SendOK(w)
|
||||
}
|
||||
@@ -167,6 +214,7 @@ func (ar *OAuth2Router) handleSetOAuthSettingsDELETE(w http.ResponseWriter, r *h
|
||||
ar.options.OAuth2ClientId = ""
|
||||
ar.options.OAuth2ClientSecret = ""
|
||||
ar.options.OAuth2Scopes = ""
|
||||
ar.options.OAuth2CodeChallengeMethod = ""
|
||||
|
||||
ar.options.Database.Delete("oauth2", "oauth2WellKnownUrl")
|
||||
ar.options.Database.Delete("oauth2", "oauth2ServerUrl")
|
||||
@@ -175,6 +223,8 @@ func (ar *OAuth2Router) handleSetOAuthSettingsDELETE(w http.ResponseWriter, r *h
|
||||
ar.options.Database.Delete("oauth2", "oauth2ClientId")
|
||||
ar.options.Database.Delete("oauth2", "oauth2ClientSecret")
|
||||
ar.options.Database.Delete("oauth2", "oauth2Scopes")
|
||||
ar.options.Database.Delete("oauth2", "oauth2CodeChallengeMethod")
|
||||
ar.options.Database.Delete("oauth2", "oauth2ConfigurationCacheTime")
|
||||
|
||||
utils.SendOK(w)
|
||||
}
|
||||
@@ -189,12 +239,10 @@ func (ar *OAuth2Router) fetchOAuth2Configuration(config *oauth2.Config) (*oauth2
|
||||
return nil, err
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
|
||||
oidcDiscoveryDocument := OIDCDiscoveryDocument{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&oidcDiscoveryDocument); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(config.Scopes) == 0 {
|
||||
config.Scopes = oidcDiscoveryDocument.ScopesSupported
|
||||
}
|
||||
@@ -241,14 +289,24 @@ func (ar *OAuth2Router) newOAuth2Conf(redirectUrl string) (*oauth2.Config, error
|
||||
func (ar *OAuth2Router) HandleOAuth2Auth(w http.ResponseWriter, r *http.Request) error {
|
||||
const callbackPrefix = "/internal/oauth2"
|
||||
const tokenCookie = "z-token"
|
||||
const verifierCookie = "z-verifier"
|
||||
scheme := "http"
|
||||
if r.TLS != nil {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
reqUrl := scheme + "://" + r.Host + r.RequestURI
|
||||
oauthConfig, err := ar.newOAuth2Conf(scheme + "://" + r.Host + callbackPrefix)
|
||||
if err != nil {
|
||||
ar.options.Logger.PrintAndLog("OAuth2Router", "Failed to fetch OIDC configuration:", err)
|
||||
oauthConfigCache, _ := ar.options.OAuth2ConfigCache.GetOrSetFunc(r.Host, func() *oauth2.Config {
|
||||
oauthConfig, err := ar.newOAuth2Conf(scheme + "://" + r.Host + callbackPrefix)
|
||||
if err != nil {
|
||||
ar.options.Logger.PrintAndLog("OAuth2Router", "Failed to fetch OIDC configuration:", err)
|
||||
return nil
|
||||
}
|
||||
return oauthConfig
|
||||
})
|
||||
|
||||
oauthConfig := oauthConfigCache.Value()
|
||||
if oauthConfig == nil {
|
||||
w.WriteHeader(500)
|
||||
return errors.New("failed to fetch OIDC configuration")
|
||||
}
|
||||
@@ -263,26 +321,48 @@ func (ar *OAuth2Router) HandleOAuth2Auth(w http.ResponseWriter, r *http.Request)
|
||||
state := r.URL.Query().Get("state")
|
||||
if r.Method == http.MethodGet && strings.HasPrefix(r.RequestURI, callbackPrefix) && code != "" && state != "" {
|
||||
ctx := context.Background()
|
||||
token, err := oauthConfig.Exchange(ctx, code)
|
||||
var authCodeOptions []oauth2.AuthCodeOption
|
||||
if ar.options.OAuth2CodeChallengeMethod == "PKCE" || ar.options.OAuth2CodeChallengeMethod == "PKCE_S256" {
|
||||
verifierCookie, err := r.Cookie(verifierCookie)
|
||||
if err != nil || verifierCookie.Value == "" {
|
||||
ar.options.Logger.PrintAndLog("OAuth2Router", "Read OAuth2 verifier cookie failed", err)
|
||||
w.WriteHeader(401)
|
||||
return errors.New("unauthorized")
|
||||
}
|
||||
authCodeOptions = append(authCodeOptions, oauth2.VerifierOption(verifierCookie.Value))
|
||||
}
|
||||
token, err := oauthConfig.Exchange(ctx, code, authCodeOptions...)
|
||||
if err != nil {
|
||||
ar.options.Logger.PrintAndLog("OAuth2", "Token exchange failed", err)
|
||||
w.WriteHeader(401)
|
||||
return errors.New("unauthorized")
|
||||
}
|
||||
|
||||
if !token.Valid() {
|
||||
ar.options.Logger.PrintAndLog("OAuth2", "Invalid token", err)
|
||||
w.WriteHeader(401)
|
||||
return errors.New("unauthorized")
|
||||
}
|
||||
|
||||
cookie := http.Cookie{Name: tokenCookie, Value: token.AccessToken, Path: "/"}
|
||||
cookieExpiry := token.Expiry
|
||||
if cookieExpiry.IsZero() || cookieExpiry.Before(time.Now()) {
|
||||
cookieExpiry = time.Now().Add(time.Hour)
|
||||
}
|
||||
cookie := http.Cookie{Name: tokenCookie, Value: token.AccessToken, Path: "/", Expires: cookieExpiry}
|
||||
if scheme == "https" {
|
||||
cookie.Secure = true
|
||||
cookie.SameSite = http.SameSiteLaxMode
|
||||
}
|
||||
w.Header().Add("Set-Cookie", cookie.String())
|
||||
|
||||
if ar.options.OAuth2CodeChallengeMethod == "PKCE" || ar.options.OAuth2CodeChallengeMethod == "PKCE_S256" {
|
||||
cookie := http.Cookie{Name: verifierCookie, Value: "", Path: "/", Expires: time.Now().Add(-time.Hour * 1)}
|
||||
if scheme == "https" {
|
||||
cookie.Secure = true
|
||||
cookie.SameSite = http.SameSiteLaxMode
|
||||
}
|
||||
w.Header().Add("Set-Cookie", cookie.String())
|
||||
}
|
||||
|
||||
//Fix for #695
|
||||
location := strings.TrimPrefix(state, "/internal/")
|
||||
//Check if the location starts with http:// or https://. if yes, this is full URL
|
||||
@@ -321,7 +401,25 @@ func (ar *OAuth2Router) HandleOAuth2Auth(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
if unauthorized {
|
||||
state := url.QueryEscape(reqUrl)
|
||||
url := oauthConfig.AuthCodeURL(state, oauth2.AccessTypeOffline)
|
||||
var url string
|
||||
if ar.options.OAuth2CodeChallengeMethod == "PKCE" || ar.options.OAuth2CodeChallengeMethod == "PKCE_S256" {
|
||||
cookie := http.Cookie{Name: verifierCookie, Value: oauth2.GenerateVerifier(), Path: "/", Expires: time.Now().Add(time.Hour * 1)}
|
||||
if scheme == "https" {
|
||||
cookie.Secure = true
|
||||
cookie.SameSite = http.SameSiteLaxMode
|
||||
}
|
||||
|
||||
w.Header().Add("Set-Cookie", cookie.String())
|
||||
|
||||
if ar.options.OAuth2CodeChallengeMethod == "PKCE" {
|
||||
url = oauthConfig.AuthCodeURL(state, oauth2.AccessTypeOffline, oauth2.SetAuthURLParam("code_challenge", cookie.Value))
|
||||
} else {
|
||||
url = oauthConfig.AuthCodeURL(state, oauth2.AccessTypeOffline, oauth2.S256ChallengeOption(cookie.Value))
|
||||
}
|
||||
} else {
|
||||
url = oauthConfig.AuthCodeURL(state, oauth2.AccessTypeOffline)
|
||||
}
|
||||
|
||||
http.Redirect(w, r, url, http.StatusFound)
|
||||
|
||||
return errors.New("unauthorized")
|
||||
|
||||
@@ -48,7 +48,7 @@ func (h *ProxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
//Check if this is a redirection url
|
||||
if h.Parent.Option.RedirectRuleTable.IsRedirectable(r) {
|
||||
statusCode := h.Parent.Option.RedirectRuleTable.HandleRedirect(w, r)
|
||||
h.Parent.logRequest(r, statusCode != 500, statusCode, "redirect", r.Host, "")
|
||||
h.Parent.logRequest(r, statusCode != 500, statusCode, "redirect", r.Host, "", nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func (h *ProxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
//Use default rule
|
||||
ruleID = "default"
|
||||
}
|
||||
if h.handleAccessRouting(ruleID, w, r) {
|
||||
if h.handleAccessRouting(ruleID, w, r, sep) {
|
||||
//Request handled by subroute
|
||||
return
|
||||
}
|
||||
@@ -79,7 +79,9 @@ func (h *ProxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if sep.RequireRateLimit {
|
||||
err := h.handleRateLimitRouting(w, r, sep)
|
||||
if err != nil {
|
||||
h.Parent.Option.Logger.LogHTTPRequest(r, "host", 307, r.Host, "")
|
||||
if !sep.DisableLogging {
|
||||
h.Parent.Option.Logger.LogHTTPRequest(r, "host", 307, r.Host, "")
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -109,7 +111,9 @@ func (h *ProxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if potentialProxtEndpoint != nil && !potentialProxtEndpoint.Disabled {
|
||||
//Missing tailing slash. Redirect to target proxy endpoint
|
||||
http.Redirect(w, r, r.RequestURI+"/", http.StatusTemporaryRedirect)
|
||||
h.Parent.Option.Logger.LogHTTPRequest(r, "redirect", 307, r.Host, "")
|
||||
if !sep.DisableLogging {
|
||||
h.Parent.Option.Logger.LogHTTPRequest(r, "redirect", 307, r.Host, "")
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -124,7 +128,7 @@ func (h *ProxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
*/
|
||||
|
||||
//Root access control based on default rule
|
||||
blocked := h.handleAccessRouting("default", w, r)
|
||||
blocked := h.handleAccessRouting("default", w, r, h.Parent.Root)
|
||||
if blocked {
|
||||
return
|
||||
}
|
||||
@@ -210,19 +214,19 @@ func (h *ProxyHandler) handleRootRouting(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
hostname := parsedURL.Hostname()
|
||||
if hostname == domainOnly {
|
||||
h.Parent.logRequest(r, false, 500, "root-redirect", domainOnly, "")
|
||||
h.Parent.logRequest(r, false, 500, "root-redirect", domainOnly, "", h.Parent.Root)
|
||||
http.Error(w, "Loopback redirects due to invalid settings", 500)
|
||||
return
|
||||
}
|
||||
|
||||
h.Parent.logRequest(r, false, 307, "root-redirect", domainOnly, "")
|
||||
h.Parent.logRequest(r, false, 307, "root-redirect", domainOnly, "", h.Parent.Root)
|
||||
http.Redirect(w, r, redirectTarget, http.StatusTemporaryRedirect)
|
||||
case DefaultSite_NotFoundPage:
|
||||
//Serve the not found page, use template if exists
|
||||
h.serve404PageWithTemplate(w, r)
|
||||
case DefaultSite_NoResponse:
|
||||
//No response. Just close the connection
|
||||
h.Parent.logRequest(r, false, 444, "root-no_resp", domainOnly, "")
|
||||
h.Parent.logRequest(r, false, 444, "root-no_resp", domainOnly, "", h.Parent.Root)
|
||||
hijacker, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
@@ -236,11 +240,11 @@ func (h *ProxyHandler) handleRootRouting(w http.ResponseWriter, r *http.Request)
|
||||
conn.Close()
|
||||
case DefaultSite_TeaPot:
|
||||
//I'm a teapot
|
||||
h.Parent.logRequest(r, false, 418, "root-teapot", domainOnly, "")
|
||||
h.Parent.logRequest(r, false, 418, "root-teapot", domainOnly, "", h.Parent.Root)
|
||||
http.Error(w, "I'm a teapot", http.StatusTeapot)
|
||||
default:
|
||||
//Unknown routing option. Send empty response
|
||||
h.Parent.logRequest(r, false, 544, "root-unknown", domainOnly, "")
|
||||
h.Parent.logRequest(r, false, 544, "root-unknown", domainOnly, "", h.Parent.Root)
|
||||
http.Error(w, "544 - No Route Defined", 544)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
// Handle access check (blacklist / whitelist), return true if request is handled (aka blocked)
|
||||
// if the return value is false, you can continue process the response writer
|
||||
func (h *ProxyHandler) handleAccessRouting(ruleID string, w http.ResponseWriter, r *http.Request) bool {
|
||||
func (h *ProxyHandler) handleAccessRouting(ruleID string, w http.ResponseWriter, r *http.Request, sep *ProxyEndpoint) bool {
|
||||
accessRule, err := h.Parent.Option.AccessController.GetAccessRuleByID(ruleID)
|
||||
if err != nil {
|
||||
//Unable to load access rule. Target rule not found?
|
||||
@@ -25,7 +25,7 @@ func (h *ProxyHandler) handleAccessRouting(ruleID string, w http.ResponseWriter,
|
||||
|
||||
isBlocked, blockedReason := accessRequestBlocked(accessRule, h.Parent.Option.WebDirectory, w, r)
|
||||
if isBlocked {
|
||||
h.Parent.logRequest(r, false, 403, blockedReason, r.Host, "")
|
||||
h.Parent.logRequest(r, false, 403, blockedReason, r.Host, "", sep)
|
||||
}
|
||||
return isBlocked
|
||||
}
|
||||
|
||||
@@ -156,7 +156,7 @@ func (router *Router) StartProxyService() error {
|
||||
if err != nil {
|
||||
http.ServeFile(w, r, "./web/hosterror.html")
|
||||
router.Option.Logger.PrintAndLog("dprouter", "failed to get upstream for hostname", err)
|
||||
router.logRequest(r, false, 404, "vdir-http", r.Host, "")
|
||||
router.logRequest(r, false, 404, "vdir-http", r.Host, "", sep)
|
||||
}
|
||||
|
||||
endpointProxyRewriteRules := GetDefaultHeaderRewriteRules()
|
||||
|
||||
@@ -28,11 +28,14 @@ func (h2c *H2CRoundTripper) RoundTrip(req *http.Request) (*http.Response, error)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, req.Method, req.RequestURI, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, req.Method, req.URL.String(), req.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Copy headers
|
||||
req.Header = req.Header.Clone()
|
||||
|
||||
tr := &http2.Transport{
|
||||
AllowHTTP: true,
|
||||
DialTLSContext: func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error) {
|
||||
@@ -43,3 +46,20 @@ func (h2c *H2CRoundTripper) RoundTrip(req *http.Request) (*http.Response, error)
|
||||
|
||||
return tr.RoundTrip(req)
|
||||
}
|
||||
|
||||
func (h2c *H2CRoundTripper) CheckServerSupportsH2C(serverURL string) bool {
|
||||
req, err := http.NewRequest("GET", serverURL, nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
tr := &http2.Transport{
|
||||
AllowHTTP: true,
|
||||
DialTLSContext: func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error) {
|
||||
var d net.Dialer
|
||||
return d.DialContext(ctx, network, addr)
|
||||
},
|
||||
}
|
||||
_, err = tr.RoundTrip(req)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
83
src/mod/dynamicproxy/modh2c/modh2c_test.go
Normal file
83
src/mod/dynamicproxy/modh2c/modh2c_test.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package modh2c
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
)
|
||||
|
||||
func TestH2CRoundTripper_RoundTrip(t *testing.T) {
|
||||
// Create a test server that supports h2c
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Proto != "HTTP/2.0" {
|
||||
t.Errorf("Expected HTTP/2.0, got %s", r.Proto)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("Hello, h2c!"))
|
||||
})
|
||||
|
||||
server := httptest.NewServer(h2c.NewHandler(mux, &http2.Server{}))
|
||||
defer server.Close()
|
||||
|
||||
// Create the round tripper
|
||||
rt := NewH2CRoundTripper()
|
||||
|
||||
// Create a request
|
||||
req, err := http.NewRequest("GET", server.URL, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
}
|
||||
|
||||
// Perform the round trip
|
||||
resp, err := rt.RoundTrip(req)
|
||||
if err != nil {
|
||||
t.Fatalf("RoundTrip failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("Expected status 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Check the response body
|
||||
body := make([]byte, 1024)
|
||||
n, err := resp.Body.Read(body)
|
||||
if err != nil && err.Error() != "EOF" {
|
||||
t.Fatalf("Failed to read body: %v", err)
|
||||
}
|
||||
if string(body[:n]) != "Hello, h2c!" {
|
||||
t.Errorf("Expected 'Hello, h2c!', got '%s'", string(body[:n]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestH2CRoundTripper_CheckServerSupportsH2C(t *testing.T) {
|
||||
// Test with h2c server
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
server := httptest.NewServer(h2c.NewHandler(mux, &http2.Server{}))
|
||||
defer server.Close()
|
||||
|
||||
rt := NewH2CRoundTripper()
|
||||
supports := rt.CheckServerSupportsH2C(server.URL)
|
||||
if !supports {
|
||||
t.Error("Expected server to support h2c")
|
||||
}
|
||||
|
||||
// Test with non-h2c server (regular HTTP/1.1)
|
||||
server2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer server2.Close()
|
||||
|
||||
supports2 := rt.CheckServerSupportsH2C(server2.URL)
|
||||
if supports2 {
|
||||
t.Error("Expected server to not support h2c")
|
||||
}
|
||||
}
|
||||
@@ -51,6 +51,11 @@ func (router *Router) GetProxyEndpointFromHostname(hostname string) *ProxyEndpoi
|
||||
matchProxyEndpoints := []*ProxyEndpoint{}
|
||||
router.ProxyEndpoints.Range(func(k, v interface{}) bool {
|
||||
ep := v.(*ProxyEndpoint)
|
||||
if ep.Disabled {
|
||||
//Skip disabled endpoint
|
||||
return true
|
||||
}
|
||||
|
||||
match, err := filepath.Match(ep.RootOrMatchingDomain, hostname)
|
||||
if err != nil {
|
||||
//Bad pattern. Skip this rule
|
||||
@@ -83,12 +88,24 @@ func (router *Router) GetProxyEndpointFromHostname(hostname string) *ProxyEndpoi
|
||||
})
|
||||
|
||||
if len(matchProxyEndpoints) == 1 {
|
||||
//Only 1 match
|
||||
return matchProxyEndpoints[0]
|
||||
} else if len(matchProxyEndpoints) > 1 {
|
||||
//More than one match. Get the best match one
|
||||
sort.Slice(matchProxyEndpoints, func(i, j int) bool {
|
||||
return matchProxyEndpoints[i].RootOrMatchingDomain < matchProxyEndpoints[j].RootOrMatchingDomain
|
||||
// More than one match, pick one that is:
|
||||
// 1. longer RootOrMatchingDomain (more specific)
|
||||
// 2. fewer wildcard characters (* and ?) (more specific)
|
||||
// 3. fallback to lexicographic order
|
||||
sort.SliceStable(matchProxyEndpoints, func(i, j int) bool {
|
||||
a := matchProxyEndpoints[i].RootOrMatchingDomain
|
||||
b := matchProxyEndpoints[j].RootOrMatchingDomain
|
||||
if len(a) != len(b) {
|
||||
return len(a) > len(b)
|
||||
}
|
||||
aw := strings.Count(a, "*") + strings.Count(a, "?")
|
||||
bw := strings.Count(b, "*") + strings.Count(b, "?")
|
||||
if aw != bw {
|
||||
return aw < bw
|
||||
}
|
||||
return a < b
|
||||
})
|
||||
return matchProxyEndpoints[0]
|
||||
}
|
||||
@@ -110,13 +127,13 @@ func (router *Router) rewriteURL(rooturl string, requestURL string) string {
|
||||
// upstreamHostSwap check if this loopback to one of the proxy rule in the system. If yes, do a shortcut target swap
|
||||
// this prevents unnecessary external DNS lookup and connection, return true if swapped and request is already handled
|
||||
// by the loopback handler. Only continue if return is false
|
||||
func (h *ProxyHandler) upstreamHostSwap(w http.ResponseWriter, r *http.Request, selectedUpstream *loadbalance.Upstream) bool {
|
||||
func (h *ProxyHandler) upstreamHostSwap(w http.ResponseWriter, r *http.Request, selectedUpstream *loadbalance.Upstream, currentTarget *ProxyEndpoint) bool {
|
||||
upstreamHostname := selectedUpstream.OriginIpOrDomain
|
||||
if strings.Contains(upstreamHostname, ":") {
|
||||
upstreamHostname = strings.Split(upstreamHostname, ":")[0]
|
||||
}
|
||||
loopbackProxyEndpoint := h.Parent.GetProxyEndpointFromHostname(upstreamHostname)
|
||||
if loopbackProxyEndpoint != nil {
|
||||
if loopbackProxyEndpoint != nil && loopbackProxyEndpoint != currentTarget {
|
||||
//This is a loopback request. Swap the target to the loopback target
|
||||
//h.Parent.Option.Logger.PrintAndLog("proxy", "Detected a loopback request to self. Swap the target to "+loopbackProxyEndpoint.RootOrMatchingDomain, nil)
|
||||
if loopbackProxyEndpoint.IsEnabled() {
|
||||
@@ -124,7 +141,7 @@ func (h *ProxyHandler) upstreamHostSwap(w http.ResponseWriter, r *http.Request,
|
||||
} else {
|
||||
//Endpoint disabled, return 503
|
||||
http.ServeFile(w, r, "./web/rperror.html")
|
||||
h.Parent.logRequest(r, false, 521, "host-http", r.Host, upstreamHostname)
|
||||
h.Parent.logRequest(r, false, 521, "host-http", r.Host, upstreamHostname, currentTarget)
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -142,12 +159,12 @@ func (h *ProxyHandler) hostRequest(w http.ResponseWriter, r *http.Request, targe
|
||||
if err != nil {
|
||||
http.ServeFile(w, r, "./web/rperror.html")
|
||||
h.Parent.Option.Logger.PrintAndLog("proxy", "Failed to assign an upstream for this request", err)
|
||||
h.Parent.logRequest(r, false, 521, "subdomain-http", r.URL.Hostname(), r.Host)
|
||||
h.Parent.logRequest(r, false, 521, "subdomain-http", r.URL.Hostname(), r.Host, target)
|
||||
return
|
||||
}
|
||||
|
||||
/* Upstream Host Swap (use to detect loopback to self) */
|
||||
if h.upstreamHostSwap(w, r, selectedUpstream) {
|
||||
if h.upstreamHostSwap(w, r, selectedUpstream, target) {
|
||||
//Request handled by the loopback handler
|
||||
return
|
||||
}
|
||||
@@ -170,7 +187,7 @@ func (h *ProxyHandler) hostRequest(w http.ResponseWriter, r *http.Request, targe
|
||||
if selectedUpstream.RequireTLS {
|
||||
u, _ = url.Parse("wss://" + wsRedirectionEndpoint + requestURL)
|
||||
}
|
||||
h.Parent.logRequest(r, true, 101, "host-websocket", reqHostname, selectedUpstream.OriginIpOrDomain)
|
||||
h.Parent.logRequest(r, true, 101, "host-websocket", reqHostname, selectedUpstream.OriginIpOrDomain, target)
|
||||
|
||||
if target.HeaderRewriteRules == nil {
|
||||
target.HeaderRewriteRules = GetDefaultHeaderRewriteRules()
|
||||
@@ -232,18 +249,18 @@ func (h *ProxyHandler) hostRequest(w http.ResponseWriter, r *http.Request, targe
|
||||
if err != nil {
|
||||
if errors.As(err, &dnsError) {
|
||||
http.ServeFile(w, r, "./web/hosterror.html")
|
||||
h.Parent.logRequest(r, false, 404, "host-http", reqHostname, upstreamHostname)
|
||||
h.Parent.logRequest(r, false, 404, "host-http", reqHostname, upstreamHostname, target)
|
||||
} else if errors.Is(err, context.Canceled) {
|
||||
//Request canceled by client, usually due to manual refresh before page load
|
||||
http.Error(w, "Request canceled", http.StatusRequestTimeout)
|
||||
h.Parent.logRequest(r, false, http.StatusRequestTimeout, "host-http", reqHostname, upstreamHostname)
|
||||
h.Parent.logRequest(r, false, http.StatusRequestTimeout, "host-http", reqHostname, upstreamHostname, target)
|
||||
} else {
|
||||
http.ServeFile(w, r, "./web/rperror.html")
|
||||
h.Parent.logRequest(r, false, 521, "host-http", reqHostname, upstreamHostname)
|
||||
h.Parent.logRequest(r, false, 521, "host-http", reqHostname, upstreamHostname, target)
|
||||
}
|
||||
}
|
||||
|
||||
h.Parent.logRequest(r, true, statusCode, "host-http", reqHostname, upstreamHostname)
|
||||
h.Parent.logRequest(r, true, statusCode, "host-http", reqHostname, upstreamHostname, target)
|
||||
}
|
||||
|
||||
// Handle vdir type request
|
||||
@@ -269,7 +286,7 @@ func (h *ProxyHandler) vdirRequest(w http.ResponseWriter, r *http.Request, targe
|
||||
target.parent.HeaderRewriteRules = GetDefaultHeaderRewriteRules()
|
||||
}
|
||||
|
||||
h.Parent.logRequest(r, true, 101, "vdir-websocket", r.Host, target.Domain)
|
||||
h.Parent.logRequest(r, true, 101, "vdir-websocket", r.Host, target.Domain, target.parent)
|
||||
wspHandler := websocketproxy.NewProxy(u, websocketproxy.Options{
|
||||
SkipTLSValidation: target.SkipCertValidations,
|
||||
SkipOriginCheck: true, //You should not use websocket via virtual directory. But keep this to true for compatibility
|
||||
@@ -325,19 +342,25 @@ func (h *ProxyHandler) vdirRequest(w http.ResponseWriter, r *http.Request, targe
|
||||
if errors.As(err, &dnsError) {
|
||||
http.ServeFile(w, r, "./web/hosterror.html")
|
||||
log.Println(err.Error())
|
||||
h.Parent.logRequest(r, false, 404, "vdir-http", reqHostname, target.Domain)
|
||||
h.Parent.logRequest(r, false, 404, "vdir-http", reqHostname, target.Domain, target.parent)
|
||||
} else {
|
||||
http.ServeFile(w, r, "./web/rperror.html")
|
||||
log.Println(err.Error())
|
||||
h.Parent.logRequest(r, false, 521, "vdir-http", reqHostname, target.Domain)
|
||||
h.Parent.logRequest(r, false, 521, "vdir-http", reqHostname, target.Domain, target.parent)
|
||||
}
|
||||
}
|
||||
h.Parent.logRequest(r, true, statusCode, "vdir-http", reqHostname, target.Domain)
|
||||
h.Parent.logRequest(r, true, statusCode, "vdir-http", reqHostname, target.Domain, target.parent)
|
||||
|
||||
}
|
||||
|
||||
// This logger collect data for the statistical analysis. For log to file logger, check the Logger and LogHTTPRequest handler
|
||||
func (router *Router) logRequest(r *http.Request, succ bool, statusCode int, forwardType string, originalHostname string, upstreamHostname string) {
|
||||
func (router *Router) logRequest(r *http.Request, succ bool, statusCode int, forwardType string, originalHostname string, upstreamHostname string, endpoint *ProxyEndpoint) {
|
||||
if endpoint != nil && endpoint.DisableLogging {
|
||||
// Notes: endpoint can be nil if the request has been handled before a host name can be resolved
|
||||
// e.g. Redirection matching rule
|
||||
// in that case we will log it by default and will not enter this routine
|
||||
return
|
||||
}
|
||||
if router.Option.StatisticCollector != nil {
|
||||
go func() {
|
||||
requestInfo := statistic.RequestInfo{
|
||||
|
||||
@@ -51,7 +51,7 @@ func (t *RequestCountPerIpTable) Clear() {
|
||||
func (h *ProxyHandler) handleRateLimitRouting(w http.ResponseWriter, r *http.Request, pe *ProxyEndpoint) error {
|
||||
err := h.Parent.handleRateLimit(w, r, pe)
|
||||
if err != nil {
|
||||
h.Parent.logRequest(r, false, 429, "ratelimit", r.URL.Hostname(), "")
|
||||
h.Parent.logRequest(r, false, 429, "ratelimit", r.URL.Hostname(), "", pe)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -208,6 +208,7 @@ type ProxyEndpoint struct {
|
||||
|
||||
//Uptime Monitor
|
||||
DisableUptimeMonitor bool //Disable uptime monitor for this endpoint
|
||||
DisableLogging bool //Disable logging of reverse proxy requests
|
||||
|
||||
// Chunked Transfer Encoding
|
||||
DisableChunkedTransferEncoding bool //Disable chunked transfer encoding for this endpoint
|
||||
|
||||
113381
src/mod/geodb/geoipv4.csv
113381
src/mod/geodb/geoipv4.csv
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
164
src/mod/info/logger/handle.go
Normal file
164
src/mod/info/logger/handle.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"imuslab.com/zoraxy/mod/utils"
|
||||
)
|
||||
|
||||
// LogConfig represents the log rotation configuration
|
||||
type LogConfig struct {
|
||||
Enabled bool `json:"enabled"` // Whether log rotation is enabled
|
||||
MaxSize string `json:"maxSize"` // Maximum size as string (e.g., "200M", "10K")
|
||||
MaxBackups int `json:"maxBackups"` // Maximum number of backup files to keep
|
||||
Compress bool `json:"compress"` // Whether to compress rotated logs
|
||||
}
|
||||
|
||||
// LoadLogConfig loads the log configuration from the config file
|
||||
func LoadLogConfig(configPath string) (*LogConfig, error) {
|
||||
// Ensure config directory exists
|
||||
configDir := filepath.Dir(configPath)
|
||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Default config
|
||||
defaultConfig := &LogConfig{
|
||||
Enabled: false,
|
||||
MaxSize: "0",
|
||||
MaxBackups: 16,
|
||||
Compress: true,
|
||||
}
|
||||
|
||||
// Try to read existing config
|
||||
file, err := os.Open(configPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// File doesn't exist, save default config
|
||||
if saveErr := SaveLogConfig(configPath, defaultConfig); saveErr != nil {
|
||||
return nil, saveErr
|
||||
}
|
||||
return defaultConfig, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var config LogConfig
|
||||
decoder := json.NewDecoder(file)
|
||||
if err := decoder.Decode(&config); err != nil {
|
||||
// If decode fails, use default
|
||||
return defaultConfig, nil
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// SaveLogConfig saves the log configuration to the config file
|
||||
func SaveLogConfig(configPath string, config *LogConfig) error {
|
||||
// Ensure config directory exists
|
||||
configDir := filepath.Dir(configPath)
|
||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
file, err := os.Create(configPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
encoder := json.NewEncoder(file)
|
||||
encoder.SetIndent("", " ")
|
||||
return encoder.Encode(config)
|
||||
}
|
||||
|
||||
// ApplyLogConfig applies the log configuration to the logger
|
||||
func (l *Logger) ApplyLogConfig(config *LogConfig) error {
|
||||
maxSizeBytes, err := utils.SizeStringToBytes(config.MaxSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if maxSizeBytes == 0 {
|
||||
// Use default value of 25MB
|
||||
maxSizeBytes = 25 * 1024 * 1024
|
||||
}
|
||||
|
||||
rotateOption := &RotateOption{
|
||||
Enabled: config.Enabled,
|
||||
MaxSize: int64(maxSizeBytes),
|
||||
MaxBackups: config.MaxBackups,
|
||||
Compress: config.Compress,
|
||||
BackupDir: "",
|
||||
}
|
||||
|
||||
l.SetRotateOption(rotateOption)
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleGetLogConfig handles GET /api/logger/config
|
||||
func HandleGetLogConfig(configPath string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
config, err := LoadLogConfig(configPath)
|
||||
if err != nil {
|
||||
utils.SendErrorResponse(w, "Failed to load log config: "+err.Error())
|
||||
return
|
||||
}
|
||||
js, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
utils.SendErrorResponse(w, "Failed to marshal config: "+err.Error())
|
||||
return
|
||||
}
|
||||
utils.SendJSONResponse(w, string(js))
|
||||
}
|
||||
}
|
||||
|
||||
// HandleUpdateLogConfig handles POST /api/logger/config
|
||||
func HandleUpdateLogConfig(configPath string, logger *Logger) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
utils.SendErrorResponse(w, "Method not allowed")
|
||||
return
|
||||
}
|
||||
|
||||
var config LogConfig
|
||||
if err := json.NewDecoder(r.Body).Decode(&config); err != nil {
|
||||
utils.SendErrorResponse(w, "Invalid JSON: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Validate MaxSize
|
||||
if _, err := utils.SizeStringToBytes(config.MaxSize); err != nil {
|
||||
utils.SendErrorResponse(w, "Invalid maxSize: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Validate MaxBackups
|
||||
if config.MaxBackups < 1 {
|
||||
utils.SendErrorResponse(w, "maxBackups must be at least 1")
|
||||
return
|
||||
}
|
||||
|
||||
// Save config
|
||||
if err := SaveLogConfig(configPath, &config); err != nil {
|
||||
utils.SendErrorResponse(w, "Failed to save config: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Apply to logger
|
||||
if err := logger.ApplyLogConfig(&config); err != nil {
|
||||
utils.SendErrorResponse(w, "Failed to apply config: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Pretty print config as key: value pairs
|
||||
configStr := fmt.Sprintf("enabled=%t, maxSize=%s, maxBackups=%d, compress=%t", config.Enabled, config.MaxSize, config.MaxBackups, config.Compress)
|
||||
logger.PrintAndLog("logger", "Updated log rotation setting: "+configStr, nil)
|
||||
utils.SendOK(w)
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -176,17 +176,17 @@ func (l *Logger) RotateLog() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// compressFile compresses the given file using zip format and creates a .gz file.
|
||||
// compressFile compresses the given file using gzip format and creates a .gz file.
|
||||
func compressFile(filename string) error {
|
||||
zipFilename := filename + ".gz"
|
||||
outFile, err := os.Create(zipFilename)
|
||||
gzipFilename := filename + ".gz"
|
||||
outFile, err := os.Create(gzipFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
zipWriter := zip.NewWriter(outFile)
|
||||
defer zipWriter.Close()
|
||||
gzipWriter := gzip.NewWriter(outFile)
|
||||
defer gzipWriter.Close()
|
||||
|
||||
fileToCompress, err := os.Open(filename)
|
||||
if err != nil {
|
||||
@@ -194,11 +194,6 @@ func compressFile(filename string) error {
|
||||
}
|
||||
defer fileToCompress.Close()
|
||||
|
||||
w, err := zipWriter.Create(filepath.Base(filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(w, fileToCompress)
|
||||
_, err = io.Copy(gzipWriter, fileToCompress)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package logviewer
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -15,7 +18,6 @@ import (
|
||||
|
||||
type ViewerOption struct {
|
||||
RootFolder string //The root folder to scan for log
|
||||
Extension string //The extension the root files use, include the . in your ext (e.g. .log)
|
||||
}
|
||||
|
||||
type Viewer struct {
|
||||
@@ -72,6 +74,11 @@ func (v *Viewer) HandleReadLog(w http.ResponseWriter, r *http.Request) {
|
||||
filter = ""
|
||||
}
|
||||
|
||||
linesParam, err := utils.GetPara(r, "lines")
|
||||
if err != nil {
|
||||
linesParam = "all"
|
||||
}
|
||||
|
||||
content, err := v.LoadLogFile(strings.TrimSpace(filepath.Base(filename)))
|
||||
if err != nil {
|
||||
utils.SendErrorResponse(w, err.Error())
|
||||
@@ -107,6 +114,18 @@ func (v *Viewer) HandleReadLog(w http.ResponseWriter, r *http.Request) {
|
||||
content = strings.Join(filteredLines, "\n")
|
||||
}
|
||||
|
||||
// Apply lines limit after filtering
|
||||
if linesParam != "all" {
|
||||
if lineLimit, err := strconv.Atoi(linesParam); err == nil && lineLimit > 0 {
|
||||
allLines := strings.Split(content, "\n")
|
||||
if len(allLines) > lineLimit {
|
||||
// Keep only the last lineLimit lines
|
||||
allLines = allLines[len(allLines)-lineLimit:]
|
||||
content = strings.Join(allLines, "\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
utils.SendTextResponse(w, content)
|
||||
}
|
||||
|
||||
@@ -158,7 +177,7 @@ func (v *Viewer) HandleLogErrorSummary(w http.ResponseWriter, r *http.Request) {
|
||||
line = line[strings.LastIndex(line, "]")+1:]
|
||||
fields := strings.Fields(strings.TrimSpace(line))
|
||||
|
||||
if len(fields) > 0 {
|
||||
if len(fields) >= 3 {
|
||||
statusStr := fields[2]
|
||||
if len(statusStr) == 3 && (statusStr[0] != '1' && statusStr[0] != '2' && statusStr[0] != '3') {
|
||||
fieldsWithTimestamp := append([]string{timestamp}, strings.Fields(strings.TrimSpace(line))...)
|
||||
@@ -179,7 +198,7 @@ func (v *Viewer) HandleLogErrorSummary(w http.ResponseWriter, r *http.Request) {
|
||||
func (v *Viewer) ListLogFiles(showFullpath bool) map[string][]*LogFile {
|
||||
result := map[string][]*LogFile{}
|
||||
filepath.WalkDir(v.option.RootFolder, func(path string, di fs.DirEntry, err error) error {
|
||||
if filepath.Ext(path) == v.option.Extension {
|
||||
if filepath.Ext(path) == ".log" || strings.HasSuffix(path, ".log.gz") {
|
||||
catergory := filepath.Base(filepath.Dir(path))
|
||||
logList, ok := result[catergory]
|
||||
if !ok {
|
||||
@@ -197,9 +216,12 @@ func (v *Viewer) ListLogFiles(showFullpath bool) map[string][]*LogFile {
|
||||
return nil
|
||||
}
|
||||
|
||||
filename := filepath.Base(path)
|
||||
filename = strings.TrimSuffix(filename, ".log") //to handle cases where the filename ends of .log.gz
|
||||
|
||||
logList = append(logList, &LogFile{
|
||||
Title: strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)),
|
||||
Filename: filepath.Base(path),
|
||||
Filename: filename,
|
||||
Fullpath: fullpath,
|
||||
Filesize: st.Size(),
|
||||
})
|
||||
@@ -212,13 +234,78 @@ func (v *Viewer) ListLogFiles(showFullpath bool) map[string][]*LogFile {
|
||||
return result
|
||||
}
|
||||
|
||||
func (v *Viewer) LoadLogFile(filename string) (string, error) {
|
||||
// readLogFileContent reads a log file, handling both compressed (.gz) and uncompressed files
|
||||
func (v *Viewer) readLogFileContent(filepath string) ([]byte, error) {
|
||||
file, err := os.Open(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Check if file is compressed
|
||||
if strings.HasSuffix(filepath, ".gz") {
|
||||
gzipReader, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
// Try zip reader for older logs that use zip compression despite .gz extension
|
||||
zipReader, err := zip.OpenReader(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer zipReader.Close()
|
||||
if len(zipReader.File) == 0 {
|
||||
return nil, errors.New("zip file is empty")
|
||||
}
|
||||
zipFile := zipReader.File[0]
|
||||
rc, err := zipFile.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rc.Close()
|
||||
return io.ReadAll(rc)
|
||||
|
||||
}
|
||||
defer gzipReader.Close()
|
||||
|
||||
return io.ReadAll(gzipReader)
|
||||
}
|
||||
|
||||
// Regular file
|
||||
return io.ReadAll(file)
|
||||
}
|
||||
|
||||
func (v *Viewer) senatizeLogFilenameInput(filename string) string {
|
||||
filename = strings.TrimSuffix(filename, ".log.gz")
|
||||
filename = strings.TrimSuffix(filename, ".log")
|
||||
filename = filepath.ToSlash(filename)
|
||||
filename = strings.ReplaceAll(filename, "../", "")
|
||||
logFilepath := filepath.Join(v.option.RootFolder, filename)
|
||||
//Check if .log.gz or .log exists
|
||||
if utils.FileExists(filepath.Join(v.option.RootFolder, filename+".log")) {
|
||||
return filepath.Join(v.option.RootFolder, filename+".log")
|
||||
}
|
||||
if utils.FileExists(filepath.Join(v.option.RootFolder, filename+".log.gz")) {
|
||||
return filepath.Join(v.option.RootFolder, filename+".log.gz")
|
||||
}
|
||||
return filepath.Join(v.option.RootFolder, filename)
|
||||
}
|
||||
|
||||
func (v *Viewer) LoadLogFile(filename string) (string, error) {
|
||||
// filename might be in (no extension), .log or .log.gz format
|
||||
// so we trim those first before proceeding
|
||||
logFilepath := v.senatizeLogFilenameInput(filename)
|
||||
if utils.FileExists(logFilepath) {
|
||||
//Load it
|
||||
content, err := os.ReadFile(logFilepath)
|
||||
content, err := v.readLogFileContent(logFilepath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(content), nil
|
||||
}
|
||||
|
||||
//Also check .log.gz
|
||||
logFilepathGz := logFilepath + ".gz"
|
||||
if utils.FileExists(logFilepathGz) {
|
||||
content, err := v.readLogFileContent(logFilepathGz)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -230,12 +317,10 @@ func (v *Viewer) LoadLogFile(filename string) (string, error) {
|
||||
}
|
||||
|
||||
func (v *Viewer) LoadLogSummary(filename string) (string, error) {
|
||||
filename = filepath.ToSlash(filename)
|
||||
filename = strings.ReplaceAll(filename, "../", "")
|
||||
logFilepath := filepath.Join(v.option.RootFolder, filename)
|
||||
logFilepath := v.senatizeLogFilenameInput(filename)
|
||||
if utils.FileExists(logFilepath) {
|
||||
//Load it
|
||||
content, err := os.ReadFile(logFilepath)
|
||||
content, err := v.readLogFileContent(logFilepath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -84,18 +84,30 @@ func (m *Manager) SetCertAsDefault(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
//Check if the previous default cert exists. If yes, get its hostname from cert contents
|
||||
defaultPubKey := filepath.Join(m.CertStore, "default.key")
|
||||
defaultPriKey := filepath.Join(m.CertStore, "default.pem")
|
||||
defaultPubKey := filepath.Join(m.CertStore, "default.pem")
|
||||
defaultPriKey := filepath.Join(m.CertStore, "default.key")
|
||||
defaultJSON := filepath.Join(m.CertStore, "default.json")
|
||||
|
||||
fmt.Println(defaultPubKey, defaultPriKey, defaultJSON)
|
||||
if utils.FileExists(defaultPubKey) && utils.FileExists(defaultPriKey) {
|
||||
//Move the existing default cert to its original name
|
||||
certBytes, err := os.ReadFile(defaultPriKey)
|
||||
certBytes, err := os.ReadFile(defaultPubKey)
|
||||
if err == nil {
|
||||
block, _ := pem.Decode(certBytes)
|
||||
if block != nil {
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err == nil {
|
||||
os.Rename(defaultPubKey, filepath.Join(m.CertStore, domainToFilename(cert.Subject.CommonName, "key")))
|
||||
os.Rename(defaultPriKey, filepath.Join(m.CertStore, domainToFilename(cert.Subject.CommonName, "pem")))
|
||||
originalKeyName := filepath.Join(m.CertStore, domainToFilename(cert.Subject.CommonName, "key"))
|
||||
originalPemName := filepath.Join(m.CertStore, domainToFilename(cert.Subject.CommonName, "pem"))
|
||||
originalJSONName := filepath.Join(m.CertStore, domainToFilename(cert.Subject.CommonName, "json"))
|
||||
|
||||
fmt.Println(defaultPubKey, originalPemName)
|
||||
os.Rename(defaultPubKey, originalPemName)
|
||||
fmt.Println(defaultPriKey, originalKeyName)
|
||||
os.Rename(defaultPriKey, originalKeyName)
|
||||
if utils.FileExists(defaultJSON) {
|
||||
os.Rename(defaultJSON, originalJSONName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -103,11 +115,15 @@ func (m *Manager) SetCertAsDefault(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
//Check if the cert exists
|
||||
certname = filepath.Base(certname) //prevent path escape
|
||||
pubKey := filepath.Join(filepath.Join(m.CertStore), certname+".key")
|
||||
priKey := filepath.Join(filepath.Join(m.CertStore), certname+".pem")
|
||||
pubKey := filepath.Join(filepath.Join(m.CertStore), certname+".pem")
|
||||
priKey := filepath.Join(filepath.Join(m.CertStore), certname+".key")
|
||||
certJSON := filepath.Join(filepath.Join(m.CertStore), certname+".json")
|
||||
if utils.FileExists(pubKey) && utils.FileExists(priKey) {
|
||||
os.Rename(pubKey, filepath.Join(m.CertStore, "default.key"))
|
||||
os.Rename(priKey, filepath.Join(m.CertStore, "default.pem"))
|
||||
os.Rename(pubKey, filepath.Join(m.CertStore, "default.pem"))
|
||||
os.Rename(priKey, filepath.Join(m.CertStore, "default.key"))
|
||||
if utils.FileExists(certJSON) {
|
||||
os.Rename(certJSON, filepath.Join(m.CertStore, "default.json"))
|
||||
}
|
||||
utils.SendOK(w)
|
||||
|
||||
//Update cert list
|
||||
|
||||
@@ -59,7 +59,7 @@ func (m *Monitor) ExecuteUptimeCheck() {
|
||||
//For each target to check online, do the following
|
||||
var thisRecord Record
|
||||
if target.Protocol == "http" || target.Protocol == "https" {
|
||||
online, laterncy, statusCode := m.getWebsiteStatusWithLatency(target.URL)
|
||||
online, laterncy, statusCode := m.getWebsiteStatusWithLatency(target)
|
||||
thisRecord = Record{
|
||||
Timestamp: time.Now().Unix(),
|
||||
ID: target.ID,
|
||||
@@ -167,30 +167,40 @@ func (m *Monitor) HandleUptimeLogRead(w http.ResponseWriter, r *http.Request) {
|
||||
*/
|
||||
|
||||
// Get website stauts with latency given URL, return is conn succ and its latency and status code
|
||||
func (m *Monitor) getWebsiteStatusWithLatency(url string) (bool, int64, int) {
|
||||
func (m *Monitor) getWebsiteStatusWithLatency(target *Target) (bool, int64, int) {
|
||||
start := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
statusCode, err := getWebsiteStatus(url)
|
||||
statusCode, err := getWebsiteStatus(target.URL)
|
||||
end := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
if err != nil {
|
||||
m.Config.Logger.PrintAndLog(logModuleName, "Ping upstream timeout. Assume offline", err)
|
||||
m.Config.OnlineStateNotify(url, false)
|
||||
return false, 0, 0
|
||||
} else {
|
||||
diff := end - start
|
||||
succ := false
|
||||
if statusCode >= 200 && statusCode < 300 {
|
||||
//OK
|
||||
succ = true
|
||||
} else if statusCode >= 300 && statusCode < 400 {
|
||||
//Redirection code
|
||||
succ = true
|
||||
} else {
|
||||
succ = false
|
||||
|
||||
// Check if this is the first record
|
||||
// sometime after startup the first check may fail due to network issues
|
||||
// we will log it as failed but not notify dynamic proxy to take down the upstream
|
||||
records, ok := m.OnlineStatusLog[target.ID]
|
||||
if !ok || len(records) == 0 {
|
||||
return false, 0, 0
|
||||
}
|
||||
m.Config.OnlineStateNotify(url, true)
|
||||
return succ, diff, statusCode
|
||||
|
||||
// Otherwise assume offline
|
||||
m.Config.OnlineStateNotify(target.URL, false)
|
||||
return false, 0, 0
|
||||
}
|
||||
|
||||
diff := end - start
|
||||
succ := false
|
||||
if statusCode >= 200 && statusCode < 300 {
|
||||
//OK
|
||||
succ = true
|
||||
} else if statusCode >= 300 && statusCode < 400 {
|
||||
//Redirection code
|
||||
succ = true
|
||||
} else {
|
||||
succ = false
|
||||
}
|
||||
m.Config.OnlineStateNotify(target.URL, true)
|
||||
return succ, diff, statusCode
|
||||
|
||||
}
|
||||
|
||||
func getWebsiteStatus(url string) (int, error) {
|
||||
|
||||
@@ -81,6 +81,26 @@ func PostPara(r *http.Request, key string) (string, error) {
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// Get POST parameter as time.Duration
|
||||
func PostDuration(r *http.Request, key string) (*time.Duration, error) {
|
||||
// Try to parse the form
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Get first value from the form
|
||||
x := r.Form.Get(key)
|
||||
if len(x) == 0 {
|
||||
return nil, errors.New("invalid " + key + " given")
|
||||
}
|
||||
|
||||
duration, err := time.ParseDuration(x)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Join(errors.New("invalid "+key+" duration"), err)
|
||||
}
|
||||
return &duration, nil
|
||||
}
|
||||
|
||||
// Get POST paramter as boolean, accept 1 or true
|
||||
func PostBool(r *http.Request, key string) (bool, error) {
|
||||
x, err := PostPara(r, key)
|
||||
|
||||
@@ -244,6 +244,9 @@ func ReverseProxyHandleAddEndpoint(w http.ResponseWriter, r *http.Request) {
|
||||
enableUtm = true
|
||||
}
|
||||
|
||||
// Disable logging?
|
||||
disableLog, _ := utils.PostBool(r, "disableLog")
|
||||
|
||||
useBypassGlobalTLS := bypassGlobalTLS == "true"
|
||||
|
||||
//Enable TLS validation?
|
||||
@@ -416,6 +419,7 @@ func ReverseProxyHandleAddEndpoint(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
Tags: tags,
|
||||
DisableUptimeMonitor: !enableUtm,
|
||||
DisableLogging: disableLog,
|
||||
}
|
||||
|
||||
preparedEndpoint, err := dynamicProxyRouter.PrepareProxyRoute(&thisProxyEndpoint)
|
||||
@@ -570,6 +574,9 @@ func ReverseProxyHandleEditEndpoint(w http.ResponseWriter, r *http.Request) {
|
||||
// Disable chunked Encoding
|
||||
disableChunkedEncoding, _ := utils.PostBool(r, "dChunkedEnc")
|
||||
|
||||
// Disable logging
|
||||
disableLogging, _ := utils.PostBool(r, "dLogging")
|
||||
|
||||
//Load the previous basic auth credentials from current proxy rules
|
||||
targetProxyEntry, err := dynamicProxyRouter.LoadProxy(rootNameOrMatchingDomain)
|
||||
if err != nil {
|
||||
@@ -611,6 +618,7 @@ func ReverseProxyHandleEditEndpoint(w http.ResponseWriter, r *http.Request) {
|
||||
newProxyEndpoint.UseStickySession = useStickySession
|
||||
newProxyEndpoint.DisableUptimeMonitor = disbleUtm
|
||||
newProxyEndpoint.DisableChunkedTransferEncoding = disableChunkedEncoding
|
||||
newProxyEndpoint.DisableLogging = disableLogging
|
||||
newProxyEndpoint.Tags = tags
|
||||
|
||||
//Prepare to replace the current routing rule
|
||||
|
||||
30
src/start.go
30
src/start.go
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
"imuslab.com/zoraxy/mod/auth/sso/oauth2"
|
||||
"imuslab.com/zoraxy/mod/eventsystem"
|
||||
"imuslab.com/zoraxy/mod/utils"
|
||||
|
||||
"github.com/gorilla/csrf"
|
||||
"imuslab.com/zoraxy/mod/access"
|
||||
@@ -77,30 +76,33 @@ func startupSequence() {
|
||||
SystemWideLogger = l
|
||||
SystemWideLogger.Println("System wide logging is disabled, all logs will be printed to STDOUT only")
|
||||
} else {
|
||||
logRotateSize, err := utils.SizeStringToBytes(*logRotate)
|
||||
// Load log configuration from file
|
||||
logConfig, err := logger.LoadLogConfig(CONF_LOG_CONFIG)
|
||||
if err != nil {
|
||||
//Default disable
|
||||
logRotateSize = 0
|
||||
SystemWideLogger.Println("Failed to load log config, using defaults: " + err.Error())
|
||||
logConfig = &logger.LogConfig{
|
||||
Enabled: false,
|
||||
MaxSize: "0",
|
||||
Compress: true,
|
||||
}
|
||||
}
|
||||
l.SetRotateOption(&logger.RotateOption{
|
||||
Enabled: logRotateSize != 0,
|
||||
MaxSize: int64(logRotateSize),
|
||||
MaxBackups: 10,
|
||||
Compress: *enableLogCompression,
|
||||
BackupDir: "",
|
||||
})
|
||||
|
||||
// Apply the configuration
|
||||
if err := l.ApplyLogConfig(logConfig); err != nil {
|
||||
SystemWideLogger.Println("Failed to apply log config: " + err.Error())
|
||||
}
|
||||
|
||||
SystemWideLogger = l
|
||||
if logRotateSize == 0 {
|
||||
if !logConfig.Enabled {
|
||||
SystemWideLogger.Println("Log rotation is disabled")
|
||||
} else {
|
||||
SystemWideLogger.Println("Log rotation is enabled, max log file size " + utils.BytesToHumanReadable(int64(logRotateSize)))
|
||||
SystemWideLogger.Println("Log rotation is enabled, max log file size " + logConfig.MaxSize)
|
||||
}
|
||||
SystemWideLogger.Println("System wide logging is enabled")
|
||||
}
|
||||
|
||||
LogViewer = logviewer.NewLogViewer(&logviewer.ViewerOption{
|
||||
RootFolder: *path_logFile,
|
||||
Extension: LOG_EXTENSION,
|
||||
})
|
||||
|
||||
//Create database
|
||||
|
||||
@@ -1568,6 +1568,15 @@
|
||||
var access_ip_country_map = {};
|
||||
|
||||
function initIpAccessTable(ipAccessCounts){
|
||||
// Filter out IPs with less than 100 requests
|
||||
var filteredCounts = {};
|
||||
for (var ip in ipAccessCounts) {
|
||||
if (ipAccessCounts[ip] >= 100) {
|
||||
filteredCounts[ip] = ipAccessCounts[ip];
|
||||
}
|
||||
}
|
||||
ipAccessCounts = filteredCounts;
|
||||
blacklist_currentPage = 1; // Reset to first page
|
||||
blacklist_totalPages = Math.ceil(Object.keys(ipAccessCounts).length / blacklist_entriesPerPage);
|
||||
|
||||
function sortkv(obj) {
|
||||
|
||||
@@ -284,6 +284,12 @@
|
||||
<label>Allow plain HTTP access<br>
|
||||
<small>Allow inbound connections without TLS/SSL</small></label>
|
||||
</div>
|
||||
<br>
|
||||
<div class="ui checkbox" style="margin-top: 0.4em;">
|
||||
<input type="checkbox" class="DisableLogging">
|
||||
<label>Disable Requests Logging<br>
|
||||
<small>Disable logging for all incoming requests for this hostname</small></label>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
@@ -885,6 +891,7 @@
|
||||
let rateLimit = $(editor).find(".RateLimit").val();
|
||||
let bypassGlobalTLS = $(editor).find(".BypassGlobalTLS")[0].checked;
|
||||
let disableChunkedTransferEncoding = $(editor).find(".DisableChunkedTransferEncoding")[0].checked;
|
||||
let disableLogging = $(editor).find(".DisableLogging")[0].checked;
|
||||
let tags = getTagsArrayFromEndpoint(uuid);
|
||||
if (tags.length > 0){
|
||||
tags = tags.join(",");
|
||||
@@ -901,6 +908,7 @@
|
||||
"authprovider" :authProviderType,
|
||||
"rate" :requireRateLimit,
|
||||
"dChunkedEnc": disableChunkedTransferEncoding,
|
||||
"dLogging": disableLogging,
|
||||
"ratenum" :rateLimit,
|
||||
"tags": tags,
|
||||
};
|
||||
@@ -1239,6 +1247,12 @@
|
||||
saveProxyInlineEdit(uuid);
|
||||
});
|
||||
|
||||
editor.find(".DisableLogging").off('change');
|
||||
editor.find(".DisableLogging").prop("checked", subd.DisableLogging);
|
||||
editor.find(".DisableLogging").on("change", function() {
|
||||
saveProxyInlineEdit(uuid);
|
||||
});
|
||||
|
||||
//Bind the edit button
|
||||
editor.find(".downstream_primary_hostname_edit_btn").off("click").on("click", function(){
|
||||
editor.find(".downstream_primary_hostname_edit_btn").parent().hide();
|
||||
|
||||
@@ -63,13 +63,20 @@
|
||||
<label>Sticky Session<br><small>Enable stick session on upstream load balancing</small></label>
|
||||
</div>
|
||||
</div>
|
||||
<div class="field">
|
||||
<div class="field">
|
||||
<div class="ui checkbox">
|
||||
<input type="checkbox" id="enableUtm" checked>
|
||||
<label>Enable uptime monitor<br><small>Automatically check upstream status and switch to another if offline</small>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
<div class="field">
|
||||
<div class="ui checkbox">
|
||||
<input type="checkbox" id="disableLog">
|
||||
<label>Disable Requests Logging<br><small>Disable requests logging for this host, recommended for high traffic sites</small>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
<div class="field">
|
||||
<label>Tags</label>
|
||||
<input type="text" id="proxyTags" placeholder="e.g. mediaserver, management">
|
||||
@@ -268,6 +275,7 @@
|
||||
let useStickySessionLB = $("#useStickySessionLB")[0].checked;
|
||||
let tags = $("#proxyTags").val().trim();
|
||||
let enableUtm = $("#enableUtm")[0].checked;
|
||||
let disableLog = $("#disableLog")[0].checked;
|
||||
|
||||
if (rootname.trim() == ""){
|
||||
$("#rootname").parent().addClass("error");
|
||||
@@ -302,7 +310,8 @@
|
||||
access: accessRuleToUse,
|
||||
stickysess: useStickySessionLB,
|
||||
tags: tags,
|
||||
enableUtm: enableUtm,
|
||||
enableUtm: enableUtm,
|
||||
disableLog: disableLog,
|
||||
},
|
||||
success: function(data){
|
||||
if (data.error != undefined){
|
||||
|
||||
@@ -109,6 +109,26 @@
|
||||
<input type="password" id="oauth2ClientSecret" name="oauth2ClientSecret" placeholder="Enter Client Secret">
|
||||
<small>Secret key of the OAuth2 application</small>
|
||||
</div>
|
||||
<div class="field">
|
||||
<label for="oauth2CodeChallengeMethod">Code Challenge Method</label>
|
||||
|
||||
<div class="ui selection dropdown" id="oauth2CodeChallengeMethod">
|
||||
<input type="hidden" name="oauth2CodeChallengeMethod">
|
||||
<i class="dropdown icon"></i>
|
||||
<div class="default text">Plain</div>
|
||||
<div class="menu">
|
||||
<div class="item" data-value="plain">Plain</div>
|
||||
<div class="item" data-value="PKCE">PKCE</div>
|
||||
<div class="item" data-value="PKCE_S256">PKCE (S256)</div>
|
||||
</div>
|
||||
</div>
|
||||
<small>Options: <br>
|
||||
Plain: No code challenge is used.<br>
|
||||
PKCE: Uses a code challenge for added security.<br>
|
||||
PKCE (S256): Uses a hashed code challenge (SHA-256) for maximum protection.<br>
|
||||
<strong>Note:</strong> PKCE (especially S256) is recommended for better security.
|
||||
</small>
|
||||
</div>
|
||||
<div class="field">
|
||||
<label for="oauth2WellKnownUrl">Discovery URL</label>
|
||||
<input type="text" id="oauth2WellKnownUrl" name="oauth2WellKnownUrl" placeholder="Enter Well-Known URL">
|
||||
@@ -138,6 +158,13 @@
|
||||
<input type="text" id="oauth2Scopes" name="oauth2Scopes" placeholder="Enter Scopes">
|
||||
<small>Scopes required by the OAuth2 provider to retrieve information about the authenticated user. Refer to your OAuth2 provider documentation for more information about this. Optional if Well-Known url is configured.</small>
|
||||
</div>
|
||||
|
||||
<div class="field">
|
||||
<label for="oauth2ConfigurationCacheTime">Configuration cache time</label>
|
||||
<input type="text" id="oauth2ConfigurationCacheTime" name="oauth2ConfigurationCacheTime" placeholder="Enter Configuration Cache Time">
|
||||
<small>Time to cache OAuth2 configuration before refresh. Accepts Go time.Duration format (e.g. 1m, 10m, 1h). Defaults to 60s.</small>
|
||||
</div>
|
||||
|
||||
<button class="ui basic button" type="submit"><i class="green check icon"></i> Apply Change</button>
|
||||
<button class="ui basic button" type="button" id="oauth2Clear"><i class="red trash icon"></i> Clear</button>
|
||||
</form>
|
||||
@@ -299,6 +326,8 @@
|
||||
$('#oauth2ClientId').val(data.oauth2ClientId);
|
||||
$('#oauth2ClientSecret').val(data.oauth2ClientSecret);
|
||||
$('#oauth2Scopes').val(data.oauth2Scopes);
|
||||
$('#oauth2ConfigurationCacheTime').val(data.oauth2ConfigurationCacheTime);
|
||||
$('[data-value="'+data.oauth2CodeChallengeMethod+'"]').click();
|
||||
},
|
||||
error: function(jqXHR, textStatus, errorThrown) {
|
||||
console.error('Error fetching SSO settings:', textStatus, errorThrown);
|
||||
|
||||
@@ -5,8 +5,9 @@
|
||||
</div>
|
||||
<div class="ui top attached tabular menu">
|
||||
<a class="utils item active" data-tab="utiltab1"><i class="ui user circle blue icon"></i> Accounts</a>
|
||||
<a class="utils item" data-tab="utiltab2">Toolbox</a>
|
||||
<a class="utils item" data-tab="utiltab3">System</a>
|
||||
<a class="utils item" data-tab="utiltab2"><i class="ui grey file alternate outline icon"></i> Logger</a>
|
||||
<a class="utils item" data-tab="utiltab3">Toolbox</a>
|
||||
<a class="utils item" data-tab="utiltab4">System</a>
|
||||
</div>
|
||||
|
||||
<div class="ui bottom attached tab segment utilitiesTabs active" data-tab="utiltab1">
|
||||
@@ -88,6 +89,60 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="ui bottom attached tab segment utilitiesTabs" data-tab="utiltab2">
|
||||
<!-- Log Settings -->
|
||||
<h3>Log Settings</h3>
|
||||
<p>Configure log rotation settings for the system logger</p>
|
||||
<div class="ui basic segment">
|
||||
<form id="log-settings-form" class="ui form">
|
||||
<div class="field">
|
||||
<div class="ui toggle checkbox">
|
||||
<input type="checkbox" id="logRotationEnabled" name="enabled">
|
||||
<label>Enable Log Rotation</label>
|
||||
</div>
|
||||
</div>
|
||||
<div class="field">
|
||||
<label>Maximum Log File Size</label>
|
||||
<div class="ui right labeled input">
|
||||
<input type="text" id="logMaxSize" name="maxSize" placeholder="e.g. 200M, 10K, 500" value="0">
|
||||
<div class="ui basic label">bytes</div>
|
||||
</div>
|
||||
<small>Enter size with suffix (K, M, G) or plain number. Set to 0 to disable.</small>
|
||||
</div>
|
||||
<div class="field">
|
||||
<label>Maximum Backup Files</label>
|
||||
<input type="number" id="logMaxBackups" name="maxBackups" placeholder="10" min="1" value="16">
|
||||
<small>Maximum number of rotated log files to keep.</small>
|
||||
</div>
|
||||
<div class="field">
|
||||
<div class="ui checkbox">
|
||||
<input type="checkbox" id="logCompressionEnabled" name="compress">
|
||||
<label>Enable Log Compression</label>
|
||||
</div>
|
||||
<small>When enabled, rotated log files will be compressed using ZIP format.</small>
|
||||
</div>
|
||||
<button class="ui basic button" type="submit">
|
||||
<i class="green save icon"></i> Save Settings
|
||||
</button>
|
||||
<button class="ui basic button" id="rotateLogBtn" onclick="triggerLogRotation(event)">
|
||||
<i class="yellow archive icon" ></i> Rotate Log Now
|
||||
</button>
|
||||
<div id="logSettingsSuccessMsg" class="ui green message" style="display:none;">
|
||||
<i class="checkmark icon"></i> Log settings updated successfully
|
||||
</div>
|
||||
<div id="logSettingsErrorMsg" class="ui red message" style="display:none;">
|
||||
<i class="exclamation triangle icon"></i> <span id="logSettingsErrorText"></span>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
<div class="ui divider"></div>
|
||||
|
||||
<!-- Log Viewer -->
|
||||
<h3>System Log Viewer</h3>
|
||||
<p>View and download Zoraxy log</p>
|
||||
<a class="ui basic button" href="snippet/logview.html" target="_blank"><i class="ui blue file icon"></i> Open Log Viewer</a>
|
||||
<div class="ui divider"></div>
|
||||
</div>
|
||||
<div class="ui bottom attached tab segment utilitiesTabs" data-tab="utiltab3">
|
||||
<h3> IP Address to CIDR</h3>
|
||||
<p>No experience with CIDR notations? Here are some tools you can use to make setting up easier.</p>
|
||||
<div class="ui basic segment">
|
||||
@@ -116,17 +171,13 @@
|
||||
</div>
|
||||
<div class="ui divider"></div>
|
||||
</div>
|
||||
<div class="ui bottom attached tab segment utilitiesTabs" data-tab="utiltab3">
|
||||
<div class="ui bottom attached tab segment utilitiesTabs" data-tab="utiltab4">
|
||||
<!-- Config Tools -->
|
||||
<h3>System Backup & Restore</h3>
|
||||
<p>Options related to system backup, migrate and restore.</p>
|
||||
<button class="ui basic button" onclick="showSideWrapper('snippet/configTools.html');"><i class="ui green undo icon icon"></i> Open Config Tools</button>
|
||||
<div class="ui divider"></div>
|
||||
<!-- Log Viewer -->
|
||||
<h3>System Log Viewer</h3>
|
||||
<p>View and download Zoraxy log</p>
|
||||
<a class="ui basic button" href="snippet/logview.html" target="_blank"><i class="ui blue file icon"></i> Open Log Viewer</a>
|
||||
<div class="ui divider"></div>
|
||||
|
||||
<!-- System Information -->
|
||||
<div id="zoraxyinfo">
|
||||
<h3 class="ui header">
|
||||
@@ -307,6 +358,7 @@
|
||||
});
|
||||
}
|
||||
initSMTPSettings();
|
||||
loadLogSettings();
|
||||
|
||||
function sendTestEmail(btn){
|
||||
$(btn).addClass("loading").addClass("disabled");
|
||||
@@ -472,4 +524,104 @@
|
||||
}
|
||||
return [ip >>> 24 & 0xFF, ip >>> 16 & 0xFF, ip >>> 8 & 0xFF, ip & 0xFF].join('.')
|
||||
}
|
||||
|
||||
/*
|
||||
Log Settings
|
||||
*/
|
||||
function loadLogSettings() {
|
||||
$.get("/api/logger/config", function(data) {
|
||||
if (data.error) {
|
||||
console.error("Failed to load log settings:", data.error);
|
||||
return;
|
||||
}
|
||||
$("#logRotationEnabled").prop("checked", data.enabled);
|
||||
$("#logMaxSize").val(data.maxSize);
|
||||
$("#logMaxBackups").val(data.maxBackups || 16);
|
||||
$("#logCompressionEnabled").prop("checked", data.compress);
|
||||
// Re-initialize checkboxes after setting values
|
||||
$('.ui.checkbox').checkbox();
|
||||
}).fail(function(xhr, status, error) {
|
||||
console.error("Failed to load log settings:", error);
|
||||
});
|
||||
}
|
||||
|
||||
function saveLogSettings() {
|
||||
const settings = {
|
||||
enabled: $("#logRotationEnabled").is(":checked"),
|
||||
maxSize: $("#logMaxSize").val().trim(),
|
||||
maxBackups: parseInt($("#logMaxBackups").val()) || 16,
|
||||
compress: $("#logCompressionEnabled").is(":checked")
|
||||
};
|
||||
|
||||
// Basic validation
|
||||
if (settings.maxSize === "") {
|
||||
showLogSettingsError("Max size cannot be empty");
|
||||
return;
|
||||
}
|
||||
|
||||
if (settings.maxBackups < 1) {
|
||||
showLogSettingsError("Max backups must be at least 1");
|
||||
return;
|
||||
}
|
||||
|
||||
$.cjax({
|
||||
type: "POST",
|
||||
url: "/api/logger/config",
|
||||
data: JSON.stringify(settings),
|
||||
success: function(data) {
|
||||
if (data.error) {
|
||||
showLogSettingsError(data.error);
|
||||
} else {
|
||||
showLogSettingsSuccess();
|
||||
}
|
||||
},
|
||||
error: function(xhr, status, error) {
|
||||
showLogSettingsError("Failed to save settings: " + error);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function showLogSettingsSuccess() {
|
||||
$("#logSettingsErrorMsg").hide();
|
||||
$("#logSettingsSuccessMsg").stop().finish().slideDown("fast").delay(3000).slideUp("fast");
|
||||
}
|
||||
|
||||
function showLogSettingsError(message) {
|
||||
$("#logSettingsSuccessMsg").hide();
|
||||
$("#logSettingsErrorText").text(message);
|
||||
$("#logSettingsErrorMsg").stop().finish().slideDown("fast").delay(5000).slideUp("fast");
|
||||
}
|
||||
|
||||
function triggerLogRotation(event) {
|
||||
event.preventDefault();
|
||||
event.stopImmediatePropagation();
|
||||
// Show loading state on button
|
||||
const rotateBtn = $('#rotateLogBtn');
|
||||
const originalText = rotateBtn.html();
|
||||
rotateBtn.html('<i class="spinner loading icon"></i> Rotating...').addClass('loading disabled');
|
||||
|
||||
$.get("/api/log/rotate/trigger", function(data) {
|
||||
if (data.error) {
|
||||
showLogSettingsError("Failed to rotate log: " + data.error);
|
||||
} else {
|
||||
showLogSettingsSuccess();
|
||||
// Update success message for rotation
|
||||
$("#logSettingsSuccessMsg").html('<i class="archive icon"></i> Log rotation completed successfully');
|
||||
}
|
||||
}).fail(function(xhr, status, error) {
|
||||
showLogSettingsError("Failed to rotate log: " + error);
|
||||
}).always(function() {
|
||||
// Restore button state
|
||||
rotateBtn.html(originalText).removeClass('loading disabled');
|
||||
});
|
||||
}
|
||||
|
||||
// Initialize Semantic UI checkboxes
|
||||
$('.ui.checkbox').checkbox();
|
||||
|
||||
// Bind form submission
|
||||
$("#log-settings-form").submit(function(e) {
|
||||
e.preventDefault();
|
||||
saveLogSettings();
|
||||
});
|
||||
</script>
|
||||
@@ -408,8 +408,10 @@
|
||||
$("#kidInput").show();
|
||||
$("#hmacInput").show();
|
||||
$("#skipTLS").show();
|
||||
$("#dnsChallenge").hide();
|
||||
$(".dnsChallengeOnly").hide();
|
||||
$("#dnsChallenge").show();
|
||||
if ($("#useDnsChallenge")[0].checked){
|
||||
$(".dnsChallengeOnly").show();
|
||||
}
|
||||
} else if (this.value == "ZeroSSL") {
|
||||
$("#kidInput").show();
|
||||
$("#hmacInput").show();
|
||||
@@ -468,7 +470,7 @@
|
||||
defaultIntValue = 2;
|
||||
defaultMinValue = 1;
|
||||
}else if (key == "PropagationTimeout"){
|
||||
defaultIntValue = 120;
|
||||
defaultIntValue = 600;
|
||||
defaultMinValue = 30;
|
||||
}
|
||||
optionalFieldsHTML += (`<div class="ui fluid labeled dnsConfigField small input" key="${key}" style="margin-top: 0.2em;">
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
<link rel="stylesheet" href="../script/semantic/semantic.min.css">
|
||||
<script type="text/javascript" src="../script/jquery-3.6.0.min.js"></script>
|
||||
<script type="text/javascript" src="../script/semantic/semantic.min.js"></script>
|
||||
<link rel="stylesheet" href="main.css">
|
||||
<link rel="stylesheet" href="../main.css">
|
||||
<style>
|
||||
.clickable{
|
||||
cursor: pointer;
|
||||
@@ -150,6 +150,20 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Lines Dropdown -->
|
||||
<div class="ui selection dropdown" id="linesDropdown" style="margin-left: 0.4em; margin-top: 0.4em; height: 2.8em;">
|
||||
<div class="text">Last 100 Lines</div>
|
||||
<i class="dropdown icon"></i>
|
||||
<div class="menu">
|
||||
<div class="item selected" data-value="100">Last 100 Lines</div>
|
||||
<div class="item" data-value="300">Last 300 Lines</div>
|
||||
<div class="item" data-value="500">Last 500 Lines</div>
|
||||
<div class="item" data-value="1000">Last 1000 Lines</div>
|
||||
<div class="item" data-value="2000">Last 2000 Lines</div>
|
||||
<div class="item" data-value="all">All Lines</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Download Button -->
|
||||
<button class="ui icon basic button logfile_menu_btn" id="downloadLogBtn" title="Download Current Log File">
|
||||
<i class="black download icon"></i>
|
||||
@@ -263,11 +277,13 @@ Pick a log file from the menu to start debugging
|
||||
<script>
|
||||
//LogView Implementation
|
||||
var currentFilter = "all";
|
||||
var currentLines = "100";
|
||||
var currentOpenedLogURL = "";
|
||||
var currentLogFile = "";
|
||||
var autoscroll = false;
|
||||
|
||||
$(".checkbox").checkbox();
|
||||
$(".dropdown").dropdown();
|
||||
|
||||
/* Menu Subpanel Switch */
|
||||
$(".subpanel").hide();
|
||||
@@ -289,7 +305,7 @@ Pick a log file from the menu to start debugging
|
||||
/* Refresh Button */
|
||||
$("#refreshLogBtn").on("click", function() {
|
||||
if (currentLogFile) {
|
||||
openLog(null, null, currentLogFile, currentFilter);
|
||||
openLog(null, null, currentLogFile, currentFilter, currentLines);
|
||||
loadLogSummary(currentLogFile);
|
||||
} else {
|
||||
alert('Please select a log file first.');
|
||||
@@ -299,6 +315,7 @@ Pick a log file from the menu to start debugging
|
||||
/* Log file dropdown */
|
||||
function populateLogfileDropdown() {
|
||||
$.get("/api/log/list", function(data){
|
||||
console.log(data);
|
||||
let $menu = $("#logfileDropdownMenu");
|
||||
$menu.html("");
|
||||
for (let [key, value] of Object.entries(data)) {
|
||||
@@ -310,17 +327,12 @@ Pick a log file from the menu to start debugging
|
||||
});
|
||||
}
|
||||
$('#logfileDropdown').dropdown('refresh');
|
||||
|
||||
//let firstItem = $menu.find('.item').first();
|
||||
//if (firstItem.length) {
|
||||
// $('#logfileDropdown').dropdown('set selected', firstItem.data('value'));
|
||||
//}
|
||||
});
|
||||
}
|
||||
$('#logfileDropdown').dropdown({
|
||||
onChange: function(value, text, $choice) {
|
||||
if (value) {
|
||||
openLog(null, $choice.data('category'), value, currentFilter || "all");
|
||||
openLog(null, $choice.data('category'), value, currentFilter, currentLines);
|
||||
loadLogSummary(value);
|
||||
}
|
||||
}
|
||||
@@ -336,7 +348,7 @@ Pick a log file from the menu to start debugging
|
||||
}
|
||||
$(".filterbtn.active").removeClass("active");
|
||||
$(`.filterbtn[filter="${value}"]`).addClass("active");
|
||||
openLog(null, null, currentLogFile, currentFilter);
|
||||
openLog(null, null, currentLogFile, currentFilter, currentLines);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -344,17 +356,30 @@ Pick a log file from the menu to start debugging
|
||||
$('#filterDropdown').dropdown('set selected', 'all');
|
||||
currentFilter = "all";
|
||||
|
||||
/* Lines dropdown */
|
||||
$('#linesDropdown').dropdown({
|
||||
onChange: function(value) {
|
||||
currentLines = value;
|
||||
if (!currentLogFile) {
|
||||
return;
|
||||
}
|
||||
openLog(null, null, currentLogFile, currentFilter, currentLines);
|
||||
}
|
||||
});
|
||||
|
||||
// Set default lines to 100
|
||||
$('#linesDropdown').dropdown('set selected', '100');
|
||||
currentLines = "100";
|
||||
|
||||
/* Log download button */
|
||||
$("#downloadLogBtn").on("click", function() {
|
||||
if (!currentLogFile) {
|
||||
alert("Please select a log file first.");
|
||||
return;
|
||||
}
|
||||
if (!currentOpenedLogURL) {
|
||||
alert("No log file is currently opened.");
|
||||
return;
|
||||
}
|
||||
$.get(currentOpenedLogURL, function(data) {
|
||||
// Always download the full log file, regardless of current line limit
|
||||
let downloadURL = "/api/log/read?file=" + currentLogFile + "&filter=" + currentFilter + "&lines=all";
|
||||
$.get(downloadURL, function(data) {
|
||||
if (data.error !== undefined) {
|
||||
alert(data.error);
|
||||
return;
|
||||
@@ -566,11 +591,11 @@ Pick a log file from the menu to start debugging
|
||||
}
|
||||
|
||||
|
||||
function openLog(object, catergory, filename, filter="all"){
|
||||
function openLog(object, catergory, filename, filter="all", lines="100"){
|
||||
$(".logfile.active").removeClass('active');
|
||||
$(object).addClass("active");
|
||||
currentLogFile = filename;
|
||||
currentOpenedLogURL = "/api/log/read?file=" + filename + "&filter=" + filter;
|
||||
currentOpenedLogURL = "/api/log/read?file=" + filename + "&filter=" + filter + "&lines=" + lines;
|
||||
$.get(currentOpenedLogURL, function(data){
|
||||
if (data.error !== undefined){
|
||||
alert(data.error);
|
||||
|
||||
Reference in New Issue
Block a user