mirror of
https://github.com/XTLS/Xray-core.git
synced 2026-05-08 14:13:22 +00:00
Geodata: Support automatically updating .dat files and hot reloading (#5992)
https://github.com/XTLS/Xray-core/pull/5992#issuecomment-4320551920 Usage: https://github.com/XTLS/Xray-core/pull/5992#issuecomment-4291168039
This commit is contained in:
4
.github/docker/Dockerfile
vendored
4
.github/docker/Dockerfile
vendored
@@ -45,8 +45,8 @@ RUN mkdir -p /tmp/var/log/xray && touch \
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
|
||||
COPY --from=build --chown=0:0 --chmod=755 /src/xray /usr/local/bin/xray
|
||||
COPY --from=build --chown=0:0 --chmod=755 /tmp/empty /usr/local/share/xray
|
||||
COPY --from=build --chown=0:0 --chmod=644 /tmp/geodat/*.dat /usr/local/share/xray/
|
||||
COPY --from=build --chown=65532:65532 --chmod=755 /tmp/empty /usr/local/share/xray
|
||||
COPY --from=build --chown=65532:65532 --chmod=644 /tmp/geodat/*.dat /usr/local/share/xray/
|
||||
COPY --from=build --chown=0:0 --chmod=755 /tmp/empty /usr/local/etc/xray
|
||||
COPY --from=build --chown=0:0 --chmod=644 /tmp/usr/local/etc/xray/*.json /usr/local/etc/xray/
|
||||
COPY --from=build --chown=0:0 --chmod=755 /tmp/empty /var/log/xray
|
||||
|
||||
4
.github/docker/Dockerfile.usa
vendored
4
.github/docker/Dockerfile.usa
vendored
@@ -54,8 +54,8 @@ RUN mkdir -p /tmp/var/log/xray && touch \
|
||||
FROM --platform=linux/amd64 gcr.io/distroless/static:nonroot
|
||||
|
||||
COPY --from=build --chown=0:0 --chmod=755 /src/xray /usr/local/bin/xray
|
||||
COPY --from=build --chown=0:0 --chmod=755 /tmp/empty /usr/local/share/xray
|
||||
COPY --from=build --chown=0:0 --chmod=644 /tmp/geodat/*.dat /usr/local/share/xray/
|
||||
COPY --from=build --chown=65532:65532 --chmod=755 /tmp/empty /usr/local/share/xray
|
||||
COPY --from=build --chown=65532:65532 --chmod=644 /tmp/geodat/*.dat /usr/local/share/xray/
|
||||
COPY --from=build --chown=0:0 --chmod=755 /tmp/empty /usr/local/etc/xray
|
||||
COPY --from=build --chown=0:0 --chmod=644 /tmp/usr/local/etc/xray/*.json /usr/local/etc/xray/
|
||||
COPY --from=build --chown=0:0 --chmod=755 /tmp/empty /var/log/xray
|
||||
|
||||
198
app/geodata/config.pb.go
Normal file
198
app/geodata/config.pb.go
Normal file
@@ -0,0 +1,198 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.11
|
||||
// protoc v6.33.5
|
||||
// source: app/geodata/config.proto
|
||||
|
||||
package geodata
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Asset struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
|
||||
File string `protobuf:"bytes,2,opt,name=file,proto3" json:"file,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *Asset) Reset() {
|
||||
*x = Asset{}
|
||||
mi := &file_app_geodata_config_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *Asset) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Asset) ProtoMessage() {}
|
||||
|
||||
func (x *Asset) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_app_geodata_config_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Asset.ProtoReflect.Descriptor instead.
|
||||
func (*Asset) Descriptor() ([]byte, []int) {
|
||||
return file_app_geodata_config_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Asset) GetUrl() string {
|
||||
if x != nil {
|
||||
return x.Url
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Asset) GetFile() string {
|
||||
if x != nil {
|
||||
return x.File
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Cron string `protobuf:"bytes,1,opt,name=cron,proto3" json:"cron,omitempty"`
|
||||
Outbound string `protobuf:"bytes,2,opt,name=outbound,proto3" json:"outbound,omitempty"`
|
||||
Assets []*Asset `protobuf:"bytes,3,rep,name=assets,proto3" json:"assets,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *Config) Reset() {
|
||||
*x = Config{}
|
||||
mi := &file_app_geodata_config_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *Config) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Config) ProtoMessage() {}
|
||||
|
||||
func (x *Config) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_app_geodata_config_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Config.ProtoReflect.Descriptor instead.
|
||||
func (*Config) Descriptor() ([]byte, []int) {
|
||||
return file_app_geodata_config_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *Config) GetCron() string {
|
||||
if x != nil {
|
||||
return x.Cron
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Config) GetOutbound() string {
|
||||
if x != nil {
|
||||
return x.Outbound
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Config) GetAssets() []*Asset {
|
||||
if x != nil {
|
||||
return x.Assets
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_app_geodata_config_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_app_geodata_config_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x18app/geodata/config.proto\x12\x10xray.app.geodata\"-\n" +
|
||||
"\x05Asset\x12\x10\n" +
|
||||
"\x03url\x18\x01 \x01(\tR\x03url\x12\x12\n" +
|
||||
"\x04file\x18\x02 \x01(\tR\x04file\"i\n" +
|
||||
"\x06Config\x12\x12\n" +
|
||||
"\x04cron\x18\x01 \x01(\tR\x04cron\x12\x1a\n" +
|
||||
"\boutbound\x18\x02 \x01(\tR\boutbound\x12/\n" +
|
||||
"\x06assets\x18\x03 \x03(\v2\x17.xray.app.geodata.AssetR\x06assetsBR\n" +
|
||||
"\x14com.xray.app.geodataP\x01Z%github.com/xtls/xray-core/app/geodata\xaa\x02\x10Xray.App.Geodatab\x06proto3"
|
||||
|
||||
var (
|
||||
file_app_geodata_config_proto_rawDescOnce sync.Once
|
||||
file_app_geodata_config_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_app_geodata_config_proto_rawDescGZIP() []byte {
|
||||
file_app_geodata_config_proto_rawDescOnce.Do(func() {
|
||||
file_app_geodata_config_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_app_geodata_config_proto_rawDesc), len(file_app_geodata_config_proto_rawDesc)))
|
||||
})
|
||||
return file_app_geodata_config_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_app_geodata_config_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_app_geodata_config_proto_goTypes = []any{
|
||||
(*Asset)(nil), // 0: xray.app.geodata.Asset
|
||||
(*Config)(nil), // 1: xray.app.geodata.Config
|
||||
}
|
||||
var file_app_geodata_config_proto_depIdxs = []int32{
|
||||
0, // 0: xray.app.geodata.Config.assets:type_name -> xray.app.geodata.Asset
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_app_geodata_config_proto_init() }
|
||||
func file_app_geodata_config_proto_init() {
|
||||
if File_app_geodata_config_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_app_geodata_config_proto_rawDesc), len(file_app_geodata_config_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_app_geodata_config_proto_goTypes,
|
||||
DependencyIndexes: file_app_geodata_config_proto_depIdxs,
|
||||
MessageInfos: file_app_geodata_config_proto_msgTypes,
|
||||
}.Build()
|
||||
File_app_geodata_config_proto = out.File
|
||||
file_app_geodata_config_proto_goTypes = nil
|
||||
file_app_geodata_config_proto_depIdxs = nil
|
||||
}
|
||||
21
app/geodata/config.proto
Normal file
21
app/geodata/config.proto
Normal file
@@ -0,0 +1,21 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package xray.app.geodata;
|
||||
option csharp_namespace = "Xray.App.Geodata";
|
||||
option go_package = "github.com/xtls/xray-core/app/geodata";
|
||||
option java_package = "com.xray.app.geodata";
|
||||
option java_multiple_files = true;
|
||||
|
||||
message Asset {
|
||||
string url = 1;
|
||||
|
||||
string file = 2;
|
||||
}
|
||||
|
||||
message Config {
|
||||
string cron = 1;
|
||||
|
||||
string outbound = 2;
|
||||
|
||||
repeated Asset assets = 3;
|
||||
}
|
||||
304
app/geodata/download.go
Normal file
304
app/geodata/download.go
Normal file
@@ -0,0 +1,304 @@
|
||||
package geodata
|
||||
|
||||
import (
|
||||
"context"
|
||||
go_errors "errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/xtls/xray-core/common/errors"
|
||||
"github.com/xtls/xray-core/common/net"
|
||||
"github.com/xtls/xray-core/common/platform/filesystem"
|
||||
"github.com/xtls/xray-core/common/task"
|
||||
"github.com/xtls/xray-core/common/utils"
|
||||
"github.com/xtls/xray-core/features/routing"
|
||||
"github.com/xtls/xray-core/transport/internet/tagged"
|
||||
)
|
||||
|
||||
const idleTimeout = 30 * time.Second
|
||||
|
||||
type stage struct {
|
||||
target string
|
||||
temp string
|
||||
}
|
||||
|
||||
type downloader struct {
|
||||
ctx context.Context
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
type idleConn struct {
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (c *idleConn) Read(b []byte) (int, error) {
|
||||
t := time.AfterFunc(idleTimeout, func() {
|
||||
_ = c.Close()
|
||||
})
|
||||
|
||||
n, err := c.Conn.Read(b)
|
||||
if !t.Stop() {
|
||||
_ = c.Close()
|
||||
return n, errors.New("connection idle timeout")
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *idleConn) Write(b []byte) (int, error) {
|
||||
return c.Conn.Write(b)
|
||||
}
|
||||
|
||||
func newDownloader(ctx context.Context, dispatcher routing.Dispatcher, outbound string) *downloader {
|
||||
return &downloader{
|
||||
ctx: ctx,
|
||||
client: newClient(ctx, dispatcher, outbound),
|
||||
}
|
||||
}
|
||||
|
||||
func newClient(baseCtx context.Context, dispatcher routing.Dispatcher, outbound string) *http.Client {
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: nil,
|
||||
DisableKeepAlives: true,
|
||||
DialContext: func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
var conn net.Conn
|
||||
err := task.Run(ctx, func() error {
|
||||
if tagged.Dialer == nil {
|
||||
return errors.New("tagged dialer is not initialized")
|
||||
}
|
||||
dest, err := net.ParseDestination(network + ":" + address)
|
||||
if err != nil {
|
||||
return errors.New("cannot understand address").Base(err)
|
||||
}
|
||||
c, err := tagged.Dialer(baseCtx, dispatcher, dest, outbound)
|
||||
if err != nil {
|
||||
return errors.New("cannot dial remote address ", dest).Base(err)
|
||||
}
|
||||
conn = c
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.New("cannot finish connection").Base(err)
|
||||
}
|
||||
return &idleConn{
|
||||
Conn: conn,
|
||||
}, nil
|
||||
},
|
||||
TLSHandshakeTimeout: idleTimeout,
|
||||
ResponseHeaderTimeout: idleTimeout,
|
||||
},
|
||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||
if req.URL.Scheme != "https" {
|
||||
return errors.New("redirected to non-https URL: ", req.URL.String())
|
||||
}
|
||||
if len(via) >= 10 {
|
||||
return errors.New("stopped after 10 redirects")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *downloader) download(assets []*Asset) ([]stage, error) {
|
||||
staged := make([]stage, 0, len(assets))
|
||||
for _, asset := range assets {
|
||||
stage, err := d.downloadOne(asset)
|
||||
if err != nil {
|
||||
clean(staged)
|
||||
return nil, err
|
||||
}
|
||||
staged = append(staged, stage)
|
||||
}
|
||||
return staged, nil
|
||||
}
|
||||
|
||||
func (d *downloader) downloadOne(asset *Asset) (stage, error) {
|
||||
target, err := filesystem.ResolveAsset(asset.File)
|
||||
if err != nil {
|
||||
return stage{}, err
|
||||
}
|
||||
errors.LogInfo(d.ctx, "downloading geodata asset from ", asset.Url, " to ", target)
|
||||
|
||||
temp, err := tempFile(target, ".tmp")
|
||||
if err != nil {
|
||||
return stage{}, err
|
||||
}
|
||||
tempName := temp.Name()
|
||||
keepTemp := false
|
||||
defer func() {
|
||||
if !keepTemp {
|
||||
os.Remove(tempName)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := d.fetch(asset.Url, temp); err != nil {
|
||||
temp.Close()
|
||||
return stage{}, err
|
||||
}
|
||||
if err := temp.Chmod(0o644); err != nil {
|
||||
temp.Close()
|
||||
return stage{}, err
|
||||
}
|
||||
if err := temp.Close(); err != nil {
|
||||
return stage{}, err
|
||||
}
|
||||
|
||||
keepTemp = true
|
||||
return stage{
|
||||
target: target,
|
||||
temp: tempName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *downloader) fetch(rawURL string, writer io.Writer) error {
|
||||
req, err := http.NewRequestWithContext(d.ctx, http.MethodGet, rawURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
utils.TryDefaultHeadersWith(req.Header, "nav")
|
||||
|
||||
resp, err := d.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
return errors.New("unexpected status code: ", resp.StatusCode)
|
||||
}
|
||||
|
||||
n, err := io.Copy(writer, resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return errors.New("empty response body")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func clean(assets []stage) {
|
||||
for _, asset := range assets {
|
||||
if asset.temp != "" {
|
||||
os.Remove(asset.temp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type tx struct {
|
||||
swaps []swap
|
||||
}
|
||||
|
||||
type swap struct {
|
||||
target string
|
||||
backup string
|
||||
hadOriginal bool
|
||||
}
|
||||
|
||||
func swapAll(assets []stage) (*tx, error) {
|
||||
t := &tx{}
|
||||
for _, asset := range assets {
|
||||
s, err := swapOne(asset)
|
||||
if err != nil {
|
||||
return nil, errors.Combine(err, t.rollback())
|
||||
}
|
||||
t.swaps = append(t.swaps, s)
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func swapOne(asset stage) (swap, error) {
|
||||
backup, err := backupFile(asset.target)
|
||||
if err != nil {
|
||||
return swap{}, err
|
||||
}
|
||||
|
||||
s := swap{
|
||||
target: asset.target,
|
||||
backup: backup,
|
||||
}
|
||||
if err := os.Rename(asset.target, backup); err != nil {
|
||||
if !go_errors.Is(err, os.ErrNotExist) {
|
||||
return swap{}, err
|
||||
}
|
||||
if err := os.Remove(backup); err != nil && !go_errors.Is(err, os.ErrNotExist) {
|
||||
return swap{}, err
|
||||
}
|
||||
} else {
|
||||
s.hadOriginal = true
|
||||
}
|
||||
|
||||
if err := os.Rename(asset.temp, asset.target); err != nil {
|
||||
if s.hadOriginal {
|
||||
if restoreErr := os.Rename(backup, asset.target); restoreErr != nil {
|
||||
return swap{}, errors.Combine(err, restoreErr)
|
||||
}
|
||||
}
|
||||
return swap{}, err
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (t *tx) rollback() error {
|
||||
var errs []error
|
||||
for i := len(t.swaps) - 1; i >= 0; i-- {
|
||||
if err := t.swaps[i].rollback(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return errors.Combine(errs...)
|
||||
}
|
||||
|
||||
func (s swap) rollback() error {
|
||||
var errs []error
|
||||
if err := os.Remove(s.target); err != nil && !go_errors.Is(err, os.ErrNotExist) {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if s.hadOriginal {
|
||||
if err := os.Rename(s.backup, s.target); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
} else if err := os.Remove(s.backup); err != nil && !go_errors.Is(err, os.ErrNotExist) {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return errors.Combine(errs...)
|
||||
}
|
||||
|
||||
func (t *tx) commit() error {
|
||||
var errs []error
|
||||
for _, swap := range t.swaps {
|
||||
if err := os.Remove(swap.backup); err != nil && !go_errors.Is(err, os.ErrNotExist) {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return errors.Combine(errs...)
|
||||
}
|
||||
|
||||
func tempFile(target string, suffix string) (*os.File, error) {
|
||||
dir := filepath.Dir(target)
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.CreateTemp(dir, "."+filepath.Base(target)+".*"+suffix)
|
||||
}
|
||||
|
||||
func backupFile(target string) (string, error) {
|
||||
file, err := tempFile(target, ".bak")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
name := file.Name()
|
||||
if err := file.Close(); err != nil {
|
||||
os.Remove(name)
|
||||
return "", err
|
||||
}
|
||||
if err := os.Remove(name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
134
app/geodata/geodata.go
Normal file
134
app/geodata/geodata.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package geodata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/robfig/cron/v3"
|
||||
"github.com/xtls/xray-core/common"
|
||||
"github.com/xtls/xray-core/common/errors"
|
||||
commongeodata "github.com/xtls/xray-core/common/geodata"
|
||||
"github.com/xtls/xray-core/core"
|
||||
"github.com/xtls/xray-core/features/routing"
|
||||
)
|
||||
|
||||
type Instance struct {
|
||||
assets []*Asset
|
||||
downloader *downloader
|
||||
tasker *cron.Cron
|
||||
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
}
|
||||
|
||||
func New(ctx context.Context, config *Config) (*Instance, error) {
|
||||
if config.Cron == "" {
|
||||
return &Instance{}, nil
|
||||
}
|
||||
|
||||
g := &Instance{
|
||||
assets: config.Assets,
|
||||
}
|
||||
|
||||
if len(g.assets) > 0 {
|
||||
var dispatcher routing.Dispatcher
|
||||
if err := core.RequireFeatures(ctx, func(d routing.Dispatcher) {
|
||||
dispatcher = d
|
||||
}); err != nil {
|
||||
return nil, errors.New("failed to get dispatcher for geodata downloader").Base(err)
|
||||
}
|
||||
g.downloader = newDownloader(ctx, dispatcher, config.Outbound)
|
||||
}
|
||||
|
||||
g.tasker = cron.New(
|
||||
cron.WithChain(cron.SkipIfStillRunning(cron.DiscardLogger)),
|
||||
cron.WithLogger(cron.DiscardLogger),
|
||||
)
|
||||
if _, err := g.tasker.AddFunc(config.Cron, g.execute); err != nil {
|
||||
return nil, errors.New("invalid geodata cron").Base(err)
|
||||
}
|
||||
errors.LogInfo(ctx, "scheduled geodata reload with cron: ", config.Cron)
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
func (g *Instance) execute() {
|
||||
var err error
|
||||
if g.downloader != nil {
|
||||
err = g.reloadWithUpdate()
|
||||
} else {
|
||||
err = reload()
|
||||
}
|
||||
if err != nil {
|
||||
errors.LogErrorInner(context.Background(), err, "scheduled geodata reload failed")
|
||||
}
|
||||
}
|
||||
|
||||
func (g *Instance) reloadWithUpdate() error {
|
||||
staged, err := g.downloader.download(g.assets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer clean(staged)
|
||||
|
||||
tx, err := swapAll(staged)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := reload(); err != nil {
|
||||
errors.LogErrorInner(context.Background(), err, "failed to reload geodata after downloading assets, rolling back")
|
||||
rollbackErr := tx.rollback()
|
||||
return errors.Combine(err, rollbackErr)
|
||||
}
|
||||
|
||||
return tx.commit()
|
||||
}
|
||||
|
||||
func reload() error {
|
||||
return errors.Combine(commongeodata.IPReg.Reload(), commongeodata.DomainReg.Reload())
|
||||
}
|
||||
|
||||
func (g *Instance) Type() interface{} {
|
||||
return (*Instance)(nil)
|
||||
}
|
||||
|
||||
func (g *Instance) Start() error {
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
|
||||
if g.running {
|
||||
return nil
|
||||
}
|
||||
|
||||
if g.tasker != nil {
|
||||
g.tasker.Start()
|
||||
}
|
||||
|
||||
g.running = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Instance) Close() error {
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
|
||||
if !g.running {
|
||||
return nil
|
||||
}
|
||||
|
||||
if g.tasker != nil {
|
||||
<-g.tasker.Stop().Done()
|
||||
}
|
||||
|
||||
g.running = false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
common.Must(common.RegisterConfig((*Config)(nil), func(ctx context.Context, cfg interface{}) (interface{}, error) {
|
||||
return New(ctx, cfg.(*Config))
|
||||
}))
|
||||
}
|
||||
@@ -1,11 +1,59 @@
|
||||
package geodata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/xtls/xray-core/common/errors"
|
||||
)
|
||||
|
||||
type DomainRegistry struct {
|
||||
mu sync.Mutex
|
||||
factory DomainMatcherFactory
|
||||
matchers []*DynamicDomainMatcher
|
||||
}
|
||||
|
||||
func (r *DomainRegistry) BuildDomainMatcher(rules []*DomainRule) (DomainMatcher, error) {
|
||||
return r.factory.BuildMatcher(rules)
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
m, err := r.factory.BuildMatcher(rules)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := NewDynamicDomainMatcher(rules, m)
|
||||
r.matchers = append(r.matchers, d)
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (r *DomainRegistry) Reload() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
errors.LogInfo(context.Background(), "reloading GeoSite data for ", len(r.matchers), " domain matcher(s)")
|
||||
|
||||
factory := newDomainMatcherFactory()
|
||||
type reloadEntry struct {
|
||||
dynamic *DynamicDomainMatcher
|
||||
matcher DomainMatcher
|
||||
}
|
||||
reloaded := make([]reloadEntry, len(r.matchers))
|
||||
for i, d := range r.matchers {
|
||||
m, err := factory.BuildMatcher(d.rules)
|
||||
if err != nil {
|
||||
errors.LogErrorInner(context.Background(), err, "failed to reload GeoSite data for domain matcher ", i)
|
||||
return err
|
||||
}
|
||||
reloaded[i] = reloadEntry{dynamic: d, matcher: m}
|
||||
}
|
||||
for _, entry := range reloaded {
|
||||
entry.dynamic.Reload(entry.matcher)
|
||||
}
|
||||
r.factory = factory
|
||||
errors.LogInfo(context.Background(), "reloaded GeoSite data for ", len(r.matchers), " domain matcher(s)")
|
||||
return nil
|
||||
}
|
||||
|
||||
func newDomainRegistry() *DomainRegistry {
|
||||
@@ -15,3 +63,32 @@ func newDomainRegistry() *DomainRegistry {
|
||||
}
|
||||
|
||||
var DomainReg = newDomainRegistry()
|
||||
|
||||
type domainMatcherState struct {
|
||||
matcher DomainMatcher
|
||||
}
|
||||
|
||||
type DynamicDomainMatcher struct {
|
||||
rules []*DomainRule
|
||||
state atomic.Pointer[domainMatcherState]
|
||||
}
|
||||
|
||||
// Match implements DomainMatcher.
|
||||
func (d *DynamicDomainMatcher) Match(input string) []uint32 {
|
||||
return d.state.Load().matcher.Match(input)
|
||||
}
|
||||
|
||||
// MatchAny implements DomainMatcher.
|
||||
func (d *DynamicDomainMatcher) MatchAny(input string) bool {
|
||||
return d.state.Load().matcher.MatchAny(input)
|
||||
}
|
||||
|
||||
func (d *DynamicDomainMatcher) Reload(newMatcher DomainMatcher) {
|
||||
d.state.Store(&domainMatcherState{matcher: newMatcher})
|
||||
}
|
||||
|
||||
func NewDynamicDomainMatcher(rules []*DomainRule, matcher DomainMatcher) *DynamicDomainMatcher {
|
||||
d := &DynamicDomainMatcher{rules: rules}
|
||||
d.Reload(matcher)
|
||||
return d
|
||||
}
|
||||
|
||||
@@ -1016,3 +1016,7 @@ func buildOptimizedIPMatcher(f *IPSetFactory, rules []*IPRule) (IPMatcher, error
|
||||
return &HeuristicMultiIPMatcher{matchers: subs}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func newIPSetFactory() *IPSetFactory {
|
||||
return &IPSetFactory{shared: make(map[string]*IPSet)}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,135 @@
|
||||
package geodata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/xtls/xray-core/common/errors"
|
||||
"github.com/xtls/xray-core/common/net"
|
||||
)
|
||||
|
||||
type IPRegistry struct {
|
||||
mu sync.Mutex
|
||||
ipsetFactory *IPSetFactory
|
||||
matchers []*DynamicIPMatcher
|
||||
}
|
||||
|
||||
func (r *IPRegistry) BuildIPMatcher(rules []*IPRule) (IPMatcher, error) {
|
||||
return buildOptimizedIPMatcher(r.ipsetFactory, rules)
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
m, err := buildOptimizedIPMatcher(r.ipsetFactory, rules)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := NewDynamicIPMatcher(rules, m)
|
||||
r.matchers = append(r.matchers, d)
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (r *IPRegistry) Reload() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
errors.LogInfo(context.Background(), "reloading GeoIP data for ", len(r.matchers), " IP matcher(s)")
|
||||
|
||||
factory := newIPSetFactory()
|
||||
type reloadEntry struct {
|
||||
dynamic *DynamicIPMatcher
|
||||
matcher IPMatcher
|
||||
}
|
||||
reloaded := make([]reloadEntry, len(r.matchers))
|
||||
for i, d := range r.matchers {
|
||||
m, err := buildOptimizedIPMatcher(factory, d.rules)
|
||||
if err != nil {
|
||||
errors.LogErrorInner(context.Background(), err, "failed to reload GeoIP data for IP matcher ", i)
|
||||
return err
|
||||
}
|
||||
reloaded[i] = reloadEntry{dynamic: d, matcher: m}
|
||||
}
|
||||
for _, entry := range reloaded {
|
||||
entry.dynamic.Reload(entry.matcher)
|
||||
}
|
||||
r.ipsetFactory = factory
|
||||
errors.LogInfo(context.Background(), "reloaded GeoIP data for ", len(r.matchers), " IP matcher(s)")
|
||||
return nil
|
||||
}
|
||||
|
||||
func newIPRegistry() *IPRegistry {
|
||||
return &IPRegistry{
|
||||
ipsetFactory: &IPSetFactory{shared: make(map[string]*IPSet)},
|
||||
ipsetFactory: newIPSetFactory(),
|
||||
}
|
||||
}
|
||||
|
||||
var IPReg = newIPRegistry()
|
||||
|
||||
type ipMatcherState struct {
|
||||
matcher IPMatcher
|
||||
}
|
||||
|
||||
type DynamicIPMatcher struct {
|
||||
rules []*IPRule
|
||||
state atomic.Pointer[ipMatcherState]
|
||||
mu sync.Mutex
|
||||
reverse bool
|
||||
reverseSet bool
|
||||
}
|
||||
|
||||
// Match implements IPMatcher.
|
||||
func (d *DynamicIPMatcher) Match(ip net.IP) bool {
|
||||
return d.state.Load().matcher.Match(ip)
|
||||
}
|
||||
|
||||
// AnyMatch implements IPMatcher.
|
||||
func (d *DynamicIPMatcher) AnyMatch(ips []net.IP) bool {
|
||||
return d.state.Load().matcher.AnyMatch(ips)
|
||||
}
|
||||
|
||||
// Matches implements IPMatcher.
|
||||
func (d *DynamicIPMatcher) Matches(ips []net.IP) bool {
|
||||
return d.state.Load().matcher.Matches(ips)
|
||||
}
|
||||
|
||||
// FilterIPs implements IPMatcher.
|
||||
func (d *DynamicIPMatcher) FilterIPs(ips []net.IP) (matched []net.IP, unmatched []net.IP) {
|
||||
return d.state.Load().matcher.FilterIPs(ips)
|
||||
}
|
||||
|
||||
// ToggleReverse implements IPMatcher.
|
||||
func (d *DynamicIPMatcher) ToggleReverse() {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
d.reverse = !d.reverse
|
||||
d.state.Load().matcher.ToggleReverse()
|
||||
}
|
||||
|
||||
// SetReverse implements IPMatcher.
|
||||
func (d *DynamicIPMatcher) SetReverse(reverse bool) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
d.reverse = reverse
|
||||
d.reverseSet = true
|
||||
d.state.Load().matcher.SetReverse(reverse)
|
||||
}
|
||||
|
||||
func (d *DynamicIPMatcher) Reload(newMatcher IPMatcher) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if d.reverseSet {
|
||||
newMatcher.SetReverse(d.reverse)
|
||||
} else if d.reverse {
|
||||
newMatcher.ToggleReverse()
|
||||
}
|
||||
d.state.Store(&ipMatcherState{matcher: newMatcher})
|
||||
}
|
||||
|
||||
func NewDynamicIPMatcher(rules []*IPRule, matcher IPMatcher) *DynamicIPMatcher {
|
||||
d := &DynamicIPMatcher{rules: rules}
|
||||
d.Reload(matcher)
|
||||
return d
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -26,11 +27,48 @@ func ReadFile(path string) ([]byte, error) {
|
||||
}
|
||||
|
||||
func ReadAsset(file string) ([]byte, error) {
|
||||
return ReadFile(platform.GetAssetLocation(file))
|
||||
path, _, err := getAssetFileLocation(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ReadFile(path)
|
||||
}
|
||||
|
||||
func OpenAsset(file string) (io.ReadCloser, error) {
|
||||
return NewFileReader(platform.GetAssetLocation(file))
|
||||
path, _, err := getAssetFileLocation(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFileReader(path)
|
||||
}
|
||||
|
||||
func StatAsset(file string) (os.FileInfo, error) {
|
||||
_, info, err := getAssetFileLocation(file)
|
||||
return info, err
|
||||
}
|
||||
|
||||
func ResolveAsset(file string) (string, error) {
|
||||
path, _, err := getAssetFileLocation(file)
|
||||
return path, err
|
||||
}
|
||||
|
||||
func getAssetFileLocation(file string) (string, os.FileInfo, error) {
|
||||
if !filepath.IsLocal(file) || file == "." {
|
||||
return "", nil, errors.New("asset path must stay in asset directory")
|
||||
}
|
||||
local, err := filepath.Localize(file)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
path := platform.GetAssetLocation(local)
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if !info.Mode().IsRegular() {
|
||||
return "", nil, errors.New("asset is not a regular file")
|
||||
}
|
||||
return path, info, nil
|
||||
}
|
||||
|
||||
func ReadCert(file string) ([]byte, error) {
|
||||
|
||||
32
common/platform/filesystem/file_test.go
Normal file
32
common/platform/filesystem/file_test.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package filesystem_test
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/xtls/xray-core/common/platform/filesystem"
|
||||
)
|
||||
|
||||
func TestStatAssetRejectsInvalidPath(t *testing.T) {
|
||||
for _, file := range []string{
|
||||
"",
|
||||
".",
|
||||
"..",
|
||||
"../geoip.dat",
|
||||
"nested/..",
|
||||
"nested/../geoip.dat",
|
||||
"nested//geoip.dat",
|
||||
"/geoip.dat",
|
||||
"/tmp/geoip.dat",
|
||||
`C:\geoip.dat`,
|
||||
`C:geoip.dat`,
|
||||
`\\server\share\geoip.dat`,
|
||||
`nested\geoip.dat`,
|
||||
`nested\..\geoip.dat`,
|
||||
filepath.Join(t.TempDir(), "geoip.dat"),
|
||||
} {
|
||||
if _, err := StatAsset(file); err == nil {
|
||||
t.Fatalf("expected error for %q", file)
|
||||
}
|
||||
}
|
||||
}
|
||||
1
go.mod
1
go.mod
@@ -14,6 +14,7 @@ require (
|
||||
github.com/pelletier/go-toml v1.9.5
|
||||
github.com/pires/go-proxyproto v0.12.0
|
||||
github.com/refraction-networking/utls v1.8.3-0.20260301010127-aa6edf4b11af
|
||||
github.com/robfig/cron/v3 v3.0.0
|
||||
github.com/sagernet/sing v0.5.1
|
||||
github.com/sagernet/sing-shadowsocks v0.2.7
|
||||
github.com/stretchr/testify v1.11.1
|
||||
|
||||
2
go.sum
2
go.sum
@@ -53,6 +53,8 @@ github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
|
||||
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
|
||||
github.com/refraction-networking/utls v1.8.3-0.20260301010127-aa6edf4b11af h1:er2acxbi3N1nvEq6HXHUAR1nTWEJmQfqiGR8EVT9rfs=
|
||||
github.com/refraction-networking/utls v1.8.3-0.20260301010127-aa6edf4b11af/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM=
|
||||
github.com/robfig/cron/v3 v3.0.0 h1:kQ6Cb7aHOHTSzNVNEhmp8EcWKLb4CbiMW9h9VyIhO4E=
|
||||
github.com/robfig/cron/v3 v3.0.0/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/sagernet/sing v0.5.1 h1:mhL/MZVq0TjuvHcpYcFtmSD1BFOxZ/+8ofbNZcg1k1Y=
|
||||
|
||||
71
infra/conf/geodata.go
Normal file
71
infra/conf/geodata.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package conf
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/robfig/cron/v3"
|
||||
"github.com/xtls/xray-core/app/geodata"
|
||||
"github.com/xtls/xray-core/common/errors"
|
||||
"github.com/xtls/xray-core/common/platform/filesystem"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type GeodataAssetConfig struct {
|
||||
URL string `json:"url"`
|
||||
File string `json:"file"`
|
||||
}
|
||||
|
||||
func (c *GeodataAssetConfig) Build() (*geodata.Asset, error) {
|
||||
if err := validateHTTPS(c.URL); err != nil {
|
||||
return nil, errors.New("invalid geodata asset url: ", c.URL).Base(err)
|
||||
}
|
||||
if _, err := filesystem.StatAsset(c.File); err != nil {
|
||||
return nil, errors.New("invalid geodata asset file: ", c.File).Base(err)
|
||||
}
|
||||
return &geodata.Asset{
|
||||
Url: c.URL,
|
||||
File: c.File,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func validateHTTPS(s string) error {
|
||||
u, err := url.ParseRequestURI(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u.Scheme != "https" || u.Host == "" {
|
||||
return errors.New("scheme must be https")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GeodataConfig struct {
|
||||
Cron *string `json:"cron"`
|
||||
Outbound string `json:"outbound"`
|
||||
Assets []*GeodataAssetConfig `json:"assets"`
|
||||
}
|
||||
|
||||
func (c *GeodataConfig) Build() (proto.Message, error) {
|
||||
config := &geodata.Config{}
|
||||
|
||||
if c.Cron != nil {
|
||||
if _, err := cron.ParseStandard(*c.Cron); err != nil {
|
||||
return nil, errors.New("invalid geodata cron").Base(err)
|
||||
}
|
||||
config.Cron = *c.Cron
|
||||
}
|
||||
|
||||
config.Outbound = c.Outbound
|
||||
|
||||
assets := make([]*geodata.Asset, 0, len(c.Assets))
|
||||
for _, asset := range c.Assets {
|
||||
built, err := asset.Build()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
assets = append(assets, built)
|
||||
}
|
||||
config.Assets = assets
|
||||
|
||||
return config, nil
|
||||
}
|
||||
75
infra/conf/geodata_test.go
Normal file
75
infra/conf/geodata_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package conf_test
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/xtls/xray-core/app/geodata"
|
||||
. "github.com/xtls/xray-core/infra/conf"
|
||||
)
|
||||
|
||||
func TestGeodataConfig(t *testing.T) {
|
||||
t.Setenv("xray.location.asset", filepath.Join("..", "..", "resources"))
|
||||
|
||||
creator := func() Buildable {
|
||||
return new(GeodataConfig)
|
||||
}
|
||||
|
||||
runMultiTestCase(t, []TestCase{
|
||||
{
|
||||
Input: `{
|
||||
"cron": "0 4 * * *",
|
||||
"outbound": "proxy",
|
||||
"assets": [
|
||||
{"url": "https://example.com/geoip.dat", "file": "geoip.dat"},
|
||||
{"url": "https://example.com/geosite.dat", "file": "geosite.dat"}
|
||||
]
|
||||
}`,
|
||||
Parser: loadJSON(creator),
|
||||
Output: &geodata.Config{
|
||||
Cron: "0 4 * * *",
|
||||
Outbound: "proxy",
|
||||
Assets: []*geodata.Asset{
|
||||
{Url: "https://example.com/geoip.dat", File: "geoip.dat"},
|
||||
{Url: "https://example.com/geosite.dat", File: "geosite.dat"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestGeodataAssetConfig(t *testing.T) {
|
||||
t.Setenv("xray.location.asset", filepath.Join("..", "..", "resources"))
|
||||
|
||||
if _, err := (&GeodataAssetConfig{
|
||||
URL: "https://example.com/geoip.dat",
|
||||
File: "geoip.dat",
|
||||
}).Build(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := (&GeodataAssetConfig{
|
||||
URL: "https://example.com/geoip.dat",
|
||||
File: "missing.dat",
|
||||
}).Build(); err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeodataAssetConfigInvalidURL(t *testing.T) {
|
||||
t.Setenv("xray.location.asset", filepath.Join("..", "..", "resources"))
|
||||
|
||||
for _, rawURL := range []string{
|
||||
"",
|
||||
"http://example.com/geoip.dat",
|
||||
"ftp://example.com/geoip.dat",
|
||||
"https:///geoip.dat",
|
||||
} {
|
||||
if _, err := (&GeodataAssetConfig{
|
||||
URL: rawURL,
|
||||
File: "geoip.dat",
|
||||
}).Build(); err == nil {
|
||||
t.Fatalf("expected error for %q", rawURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -361,6 +361,7 @@ type Config struct {
|
||||
Observatory *ObservatoryConfig `json:"observatory"`
|
||||
BurstObservatory *BurstObservatoryConfig `json:"burstObservatory"`
|
||||
Version *VersionConfig `json:"version"`
|
||||
Geodata *GeodataConfig `json:"geodata"`
|
||||
}
|
||||
|
||||
func (c *Config) findInboundTag(tag string) int {
|
||||
@@ -433,6 +434,10 @@ func (c *Config) Override(o *Config, fn string) {
|
||||
c.Version = o.Version
|
||||
}
|
||||
|
||||
if o.Geodata != nil {
|
||||
c.Geodata = o.Geodata
|
||||
}
|
||||
|
||||
// update the Inbound in slice if the only one in override config has same tag
|
||||
if len(o.InboundConfigs) > 0 {
|
||||
for i := range o.InboundConfigs {
|
||||
@@ -581,6 +586,14 @@ func (c *Config) Build() (*core.Config, error) {
|
||||
config.App = append(config.App, serial.ToTypedMessage(r))
|
||||
}
|
||||
|
||||
if c.Geodata != nil {
|
||||
r, err := c.Geodata.Build()
|
||||
if err != nil {
|
||||
return nil, errors.New("failed to build geodata configuration").Base(err)
|
||||
}
|
||||
config.App = append(config.App, serial.ToTypedMessage(r))
|
||||
}
|
||||
|
||||
var inbounds []InboundDetourConfig
|
||||
|
||||
if len(c.InboundConfigs) > 0 {
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
// Other optional features.
|
||||
_ "github.com/xtls/xray-core/app/dns"
|
||||
_ "github.com/xtls/xray-core/app/dns/fakedns"
|
||||
_ "github.com/xtls/xray-core/app/geodata"
|
||||
_ "github.com/xtls/xray-core/app/log"
|
||||
_ "github.com/xtls/xray-core/app/metrics"
|
||||
_ "github.com/xtls/xray-core/app/policy"
|
||||
|
||||
Reference in New Issue
Block a user