Big refactor
This commit is contained in:
103
.github/workflows/binary-build.yml
vendored
103
.github/workflows/binary-build.yml
vendored
@@ -1,103 +0,0 @@
|
|||||||
name: Build zurg-testing executable binary
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
tags:
|
|
||||||
- v0*
|
|
||||||
- latest
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
determine_version:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
outputs:
|
|
||||||
build_version: ${{ steps.set_version.outputs.version }}
|
|
||||||
steps:
|
|
||||||
- id: set_version
|
|
||||||
run: |
|
|
||||||
CURRENT_DATE=$(date +'%Y%m%d%H%M')
|
|
||||||
echo "version=beta-${CURRENT_DATE}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
build:
|
|
||||||
name: Build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: determine_version
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
goos: [windows, darwin, linux]
|
|
||||||
goarch: [386, amd64, arm, arm64]
|
|
||||||
exclude:
|
|
||||||
- goos: darwin
|
|
||||||
goarch: 386
|
|
||||||
- goos: darwin
|
|
||||||
goarch: arm
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Check out code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v4
|
|
||||||
with:
|
|
||||||
go-version: 'stable'
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
CGO_ENABLED=0 GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} go build -ldflags="-s -w" -o zurg-${{ needs.determine_version.outputs.build_version }}-${{ matrix.goos }}-${{ matrix.goarch }} cmd/zurg/main.go
|
|
||||||
|
|
||||||
# Install and use UPX to compress the binary, but exclude specific combinations
|
|
||||||
- name: Install and Compress with UPX
|
|
||||||
if: (matrix.goos != 'windows' || matrix.goarch != 'arm') && (matrix.goos != 'windows' || matrix.goarch != 'arm64')
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y upx-ucl
|
|
||||||
upx --best zurg-${{ needs.determine_version.outputs.build_version }}-${{ matrix.goos }}-${{ matrix.goarch }}
|
|
||||||
|
|
||||||
# Zip the binary
|
|
||||||
- name: Zip Binary
|
|
||||||
run: |
|
|
||||||
zip zurg-${{ needs.determine_version.outputs.build_version }}-${{ matrix.goos }}-${{ matrix.goarch }}.zip zurg-${{ needs.determine_version.outputs.build_version }}-${{ matrix.goos }}-${{ matrix.goarch }}
|
|
||||||
|
|
||||||
- name: List files
|
|
||||||
run: ls -alh
|
|
||||||
|
|
||||||
- name: Upload Artifacts
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: zurg-${{ needs.determine_version.outputs.build_version }}-${{ matrix.goos }}-${{ matrix.goarch }}.zip
|
|
||||||
path: zurg-${{ needs.determine_version.outputs.build_version }}-${{ matrix.goos }}-${{ matrix.goarch }}.zip
|
|
||||||
|
|
||||||
release:
|
|
||||||
needs:
|
|
||||||
- determine_version
|
|
||||||
- build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Download all artifacts
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
|
|
||||||
- name: Release
|
|
||||||
uses: softprops/action-gh-release@v1
|
|
||||||
with:
|
|
||||||
name: ${{ needs.determine_version.outputs.build_version }}
|
|
||||||
tag_name: ${{ needs.determine_version.outputs.build_version }}
|
|
||||||
generate_release_notes: true
|
|
||||||
files: |
|
|
||||||
./zurg-${{ needs.determine_version.outputs.build_version }}-darwin-amd64.zip
|
|
||||||
./zurg-${{ needs.determine_version.outputs.build_version }}-darwin-arm64.zip
|
|
||||||
./zurg-${{ needs.determine_version.outputs.build_version }}-linux-386.zip
|
|
||||||
./zurg-${{ needs.determine_version.outputs.build_version }}-linux-amd64.zip
|
|
||||||
./zurg-${{ needs.determine_version.outputs.build_version }}-linux-arm.zip
|
|
||||||
./zurg-${{ needs.determine_version.outputs.build_version }}-linux-arm64.zip
|
|
||||||
./zurg-${{ needs.determine_version.outputs.build_version }}-windows-386.zip
|
|
||||||
./zurg-${{ needs.determine_version.outputs.build_version }}-windows-amd64.zip
|
|
||||||
./zurg-${{ needs.determine_version.outputs.build_version }}-windows-arm.zip
|
|
||||||
./zurg-${{ needs.determine_version.outputs.build_version }}-windows-arm64.zip
|
|
||||||
token: ${{ secrets.PAT }}
|
|
||||||
env:
|
|
||||||
GITHUB_REPOSITORY: debridmediamanager/zurg-testing
|
|
||||||
|
|
||||||
- name: List files
|
|
||||||
run: ls -alh
|
|
||||||
40
.github/workflows/build.yml
vendored
Normal file
40
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
name: build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ '**' ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ '**' ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
platform: [ubuntu-latest]
|
||||||
|
go-version: [1.21.3]
|
||||||
|
name: Build
|
||||||
|
runs-on: ${{ matrix.platform }}
|
||||||
|
steps:
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
docker pull techknowlogick/xgo:latest
|
||||||
|
go install src.techknowlogick.com/xgo@latest
|
||||||
|
sudo apt install upx
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
bash build.sh dev
|
||||||
|
|
||||||
|
- name: Upload artifact
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: zurg
|
||||||
|
path: dist
|
||||||
23
Dockerfile
23
Dockerfile
@@ -6,14 +6,15 @@ ARG GOARCH=amd64
|
|||||||
FROM golang:1-alpine AS builder
|
FROM golang:1-alpine AS builder
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -ldflags="-s -w" -o zurg cmd/zurg/main.go
|
RUN apk add --no-cache bash git go gcc musl-dev curl fuse
|
||||||
|
RUN go build -o zurg cmd/zurg/main.go
|
||||||
|
|
||||||
# Obfuscation stage
|
# Obfuscation stage
|
||||||
FROM alpine:3 AS obfuscator
|
FROM alpine:3 AS obfuscator
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY --from=builder /app/zurg .
|
COPY --from=builder /app/zurg .
|
||||||
# RUN apk add --no-cache upx
|
RUN apk add --no-cache upx
|
||||||
# RUN upx --brute zurg
|
RUN upx --brute zurg
|
||||||
# Create a health check script that extracts the port from the config file
|
# Create a health check script that extracts the port from the config file
|
||||||
RUN echo $'#!/bin/sh\n\
|
RUN echo $'#!/bin/sh\n\
|
||||||
port=$(yaml read /app/config.yml port)\n\
|
port=$(yaml read /app/config.yml port)\n\
|
||||||
@@ -24,19 +25,6 @@ nc -z localhost $port || exit 1' > /app/healthcheck.sh && \
|
|||||||
FROM alpine:3
|
FROM alpine:3
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Accept UID and GID as build arguments with default values
|
|
||||||
ARG UID=1000
|
|
||||||
ARG GID=1000
|
|
||||||
|
|
||||||
# Add a group with the specified GID
|
|
||||||
RUN addgroup -g ${GID} appgroup
|
|
||||||
|
|
||||||
# Add a user with the specified UID and add to the group
|
|
||||||
RUN adduser -u ${UID} -D -G appgroup appuser
|
|
||||||
|
|
||||||
# Change the ownership of the /app directory to the appuser
|
|
||||||
RUN chown -R appuser:appgroup /app
|
|
||||||
|
|
||||||
# Copy the obfuscated binary from the obfuscator stage
|
# Copy the obfuscated binary from the obfuscator stage
|
||||||
COPY --from=obfuscator /app/zurg .
|
COPY --from=obfuscator /app/zurg .
|
||||||
COPY --from=obfuscator /app/healthcheck.sh .
|
COPY --from=obfuscator /app/healthcheck.sh .
|
||||||
@@ -48,9 +36,6 @@ COPY config.yml.example /app/config.yml
|
|||||||
RUN apk add --no-cache fuse3 netcat-openbsd yaml-cpp \
|
RUN apk add --no-cache fuse3 netcat-openbsd yaml-cpp \
|
||||||
&& echo 'user_allow_other' >> /etc/fuse.conf
|
&& echo 'user_allow_other' >> /etc/fuse.conf
|
||||||
|
|
||||||
# Use the non-root user to run the application
|
|
||||||
USER appuser
|
|
||||||
|
|
||||||
# Use the script for the health check
|
# Use the script for the health check
|
||||||
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 CMD /app/healthcheck.sh
|
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 CMD /app/healthcheck.sh
|
||||||
|
|
||||||
|
|||||||
109
build.sh
Executable file
109
build.sh
Executable file
@@ -0,0 +1,109 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
appName="zurg"
|
||||||
|
builtAt="$(date +'%F %T %z')"
|
||||||
|
goVersion=$(go version | sed 's/go version //')
|
||||||
|
gitCommit=$(git log --pretty=format:"%h" -1)
|
||||||
|
|
||||||
|
if [ "$1" = "dev" ]; then
|
||||||
|
version="dev"
|
||||||
|
else
|
||||||
|
version=$(git describe --abbrev=0 --tags)
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "app version: $version"
|
||||||
|
|
||||||
|
ldflags="\
|
||||||
|
-w -s \
|
||||||
|
-X 'github.com/debridmediamanager/zurg/internal/version.BuiltAt=$builtAt' \
|
||||||
|
-X 'github.com/debridmediamanager/zurg/internal/version.GoVersion=$goVersion' \
|
||||||
|
-X 'github.com/debridmediamanager/zurg/internal/version.GitCommit=$gitCommit' \
|
||||||
|
-X 'github.com/debridmediamanager/zurg/internal/version.Version=$version' \
|
||||||
|
"
|
||||||
|
|
||||||
|
BuildDev() {
|
||||||
|
# rm -rf .git/
|
||||||
|
xgo -targets=linux/amd64,windows/amd64,darwin/arm64 -out "$appName" -ldflags="$ldflags" ./cmd/zurg
|
||||||
|
mkdir -p "dist"
|
||||||
|
mv zurg-* dist
|
||||||
|
cd dist
|
||||||
|
upx -9 ./zurg-linux-amd64
|
||||||
|
upx -9 ./zurg-windows*
|
||||||
|
find . -type f -print0 | xargs -0 md5sum >md5.txt
|
||||||
|
cat md5.txt
|
||||||
|
}
|
||||||
|
|
||||||
|
BuildDocker() {
|
||||||
|
go build -o ./bin/zurg -ldflags="$ldflags" .
|
||||||
|
}
|
||||||
|
|
||||||
|
BuildRelease() {
|
||||||
|
# rm -rf .git/
|
||||||
|
mkdir -p "build"
|
||||||
|
muslflags="--extldflags '-static -fpic' $ldflags"
|
||||||
|
BASE="https://musl.nn.ci/"
|
||||||
|
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross arm-linux-musleabihf-cross mips-linux-musl-cross mips64-linux-musl-cross mips64el-linux-musl-cross mipsel-linux-musl-cross powerpc64le-linux-musl-cross s390x-linux-musl-cross)
|
||||||
|
for i in "${FILES[@]}"; do
|
||||||
|
url="${BASE}${i}.tgz"
|
||||||
|
curl -L -o "${i}.tgz" "${url}"
|
||||||
|
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
|
||||||
|
done
|
||||||
|
OS_ARCHES=(linux-musl-amd64 linux-musl-arm64 linux-musl-arm linux-musl-mips linux-musl-mips64 linux-musl-mips64le linux-musl-mipsle linux-musl-ppc64le linux-musl-s390x)
|
||||||
|
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc arm-linux-musleabihf-gcc mips-linux-musl-gcc mips64-linux-musl-gcc mips64el-linux-musl-gcc mipsel-linux-musl-gcc powerpc64le-linux-musl-gcc s390x-linux-musl-gcc)
|
||||||
|
for i in "${!OS_ARCHES[@]}"; do
|
||||||
|
os_arch=${OS_ARCHES[$i]}
|
||||||
|
cgo_cc=${CGO_ARGS[$i]}
|
||||||
|
echo building for ${os_arch}
|
||||||
|
export GOOS=${os_arch%%-*}
|
||||||
|
export GOARCH=${os_arch##*-}
|
||||||
|
export CC=${cgo_cc}
|
||||||
|
export CGO_ENABLED=1
|
||||||
|
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" .
|
||||||
|
done
|
||||||
|
xgo -out "$appName" -ldflags="$ldflags" .
|
||||||
|
# why? Because some target platforms seem to have issues with upx compression
|
||||||
|
upx -9 ./zurg-linux-amd64
|
||||||
|
upx -9 ./zurg-windows*
|
||||||
|
mv zurg-* build
|
||||||
|
}
|
||||||
|
|
||||||
|
MakeRelease() {
|
||||||
|
cd build
|
||||||
|
mkdir compress
|
||||||
|
for i in $(find . -type f -name "$appName-linux-*"); do
|
||||||
|
cp "$i" zurg
|
||||||
|
tar -czvf compress/"$i".tar.gz zurg
|
||||||
|
rm -f zurg
|
||||||
|
done
|
||||||
|
for i in $(find . -type f -name "$appName-darwin-*"); do
|
||||||
|
cp "$i" zurg
|
||||||
|
tar -czvf compress/"$i".tar.gz zurg
|
||||||
|
rm -f zurg
|
||||||
|
done
|
||||||
|
for i in $(find . -type f -name "$appName-windows-*"); do
|
||||||
|
cp "$i" zurg.exe
|
||||||
|
zip compress/$(echo $i | sed 's/\.[^.]*$//').zip zurg.exe
|
||||||
|
rm -f zurg.exe
|
||||||
|
done
|
||||||
|
cd compress
|
||||||
|
find . -type f -print0 | xargs -0 md5sum >md5.txt
|
||||||
|
cat md5.txt
|
||||||
|
cd ../..
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ "$1" = "dev" ]; then
|
||||||
|
if [ "$2" = "docker" ]; then
|
||||||
|
BuildDocker
|
||||||
|
else
|
||||||
|
BuildDev
|
||||||
|
fi
|
||||||
|
elif [ "$1" = "release" ]; then
|
||||||
|
if [ "$2" = "docker" ]; then
|
||||||
|
BuildDocker
|
||||||
|
else
|
||||||
|
BuildRelease
|
||||||
|
MakeRelease
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "Parameter error"
|
||||||
|
fi
|
||||||
@@ -6,24 +6,35 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"runtime"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/debridmediamanager.com/zurg/internal/config"
|
"github.com/debridmediamanager.com/zurg/internal/config"
|
||||||
"github.com/debridmediamanager.com/zurg/internal/net"
|
"github.com/debridmediamanager.com/zurg/internal/net"
|
||||||
"github.com/debridmediamanager.com/zurg/internal/torrent"
|
"github.com/debridmediamanager.com/zurg/internal/torrent"
|
||||||
|
"github.com/debridmediamanager.com/zurg/internal/version"
|
||||||
"github.com/debridmediamanager.com/zurg/internal/zfs"
|
"github.com/debridmediamanager.com/zurg/internal/zfs"
|
||||||
|
"github.com/debridmediamanager.com/zurg/pkg/chunk"
|
||||||
"github.com/debridmediamanager.com/zurg/pkg/logutil"
|
"github.com/debridmediamanager.com/zurg/pkg/logutil"
|
||||||
"github.com/debridmediamanager.com/zurg/pkg/realdebrid"
|
"github.com/debridmediamanager.com/zurg/pkg/realdebrid"
|
||||||
"github.com/hashicorp/golang-lru/v2/expirable"
|
"github.com/hashicorp/golang-lru/v2/expirable"
|
||||||
|
"github.com/winfsp/cgofuse/fuse"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
if len(os.Args) > 1 && os.Args[1] == "networktest" {
|
// special commands
|
||||||
|
if len(os.Args) > 1 {
|
||||||
|
switch os.Args[1] {
|
||||||
|
case "version":
|
||||||
|
version.Show()
|
||||||
|
case "networktest":
|
||||||
realdebrid.RunTest()
|
realdebrid.RunTest()
|
||||||
return
|
}
|
||||||
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// normal startup
|
||||||
log := logutil.NewLogger().Named("zurg")
|
log := logutil.NewLogger().Named("zurg")
|
||||||
|
|
||||||
config, configErr := config.LoadZurgConfig("./config.yml")
|
config, configErr := config.LoadZurgConfig("./config.yml")
|
||||||
@@ -53,16 +64,26 @@ func main() {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Start the mount in a goroutine with panic recovery.
|
log.Debugf("Initializing chunk manager, cores: %d", runtime.NumCPU())
|
||||||
mountPoint := config.GetMountPoint()
|
// 64kb request size
|
||||||
if _, err := os.Stat(mountPoint); os.IsNotExist(err) {
|
chunkMgr, err := chunk.NewManager(
|
||||||
if err := os.Mkdir(mountPoint, 0755); err != nil {
|
"",
|
||||||
log.Panicf("Failed to create mount point: %v", err)
|
1048576, // 1MB
|
||||||
}
|
1, // 1 chunk - load ahead (1MB total)
|
||||||
|
max(runtime.NumCPU()/2, 1), // check threads
|
||||||
|
max(runtime.NumCPU()/2, 1), // load threads
|
||||||
|
runtime.NumCPU()*2,
|
||||||
|
torrentMgr, // max chunks
|
||||||
|
config)
|
||||||
|
if nil != err {
|
||||||
|
log.Panicf("Failed to initialize chunk manager: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fs := zfs.NewZurgFS(torrentMgr, config, chunkMgr, logutil.NewLogger().Named("zfs"))
|
||||||
|
host := fuse.NewFileSystemHost(fs)
|
||||||
go func() {
|
go func() {
|
||||||
log.Infof("Mounting on %s", mountPoint)
|
log.Infof("Mounting on %s", config.GetMountPoint())
|
||||||
if err := zfs.Mount(mountPoint, config, torrentMgr); err != nil {
|
if err := zfs.Mount(host, config); err != nil {
|
||||||
log.Panicf("Failed to mount: %v", err)
|
log.Panicf("Failed to mount: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -75,7 +96,7 @@ func main() {
|
|||||||
if err := server.Shutdown(ctx); err != nil {
|
if err := server.Shutdown(ctx); err != nil {
|
||||||
log.Errorf("Server shutdown error: %v\n", err)
|
log.Errorf("Server shutdown error: %v\n", err)
|
||||||
}
|
}
|
||||||
if err := zfs.Unmount(mountPoint); err != nil {
|
if err := zfs.Unmount(host); err != nil {
|
||||||
log.Errorf("Unmount error: %v\n", err)
|
log.Errorf("Unmount error: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
1
dist/md5.txt
vendored
Normal file
1
dist/md5.txt
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
d41d8cd98f00b204e9800998ecf8427e ./md5.txt
|
||||||
6
go.mod
6
go.mod
@@ -3,7 +3,6 @@ module github.com/debridmediamanager.com/zurg
|
|||||||
go 1.21.3
|
go 1.21.3
|
||||||
|
|
||||||
require (
|
require (
|
||||||
bazil.org/fuse v0.0.0-20230120002735-62a210ff1fd5
|
|
||||||
github.com/elliotchance/orderedmap/v2 v2.2.0
|
github.com/elliotchance/orderedmap/v2 v2.2.0
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||||
go.uber.org/zap v1.26.0
|
go.uber.org/zap v1.26.0
|
||||||
@@ -11,4 +10,7 @@ require (
|
|||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require go.uber.org/multierr v1.10.0 // indirect
|
require (
|
||||||
|
github.com/winfsp/cgofuse v1.5.0 // indirect
|
||||||
|
go.uber.org/multierr v1.10.0 // indirect
|
||||||
|
)
|
||||||
|
|||||||
2
go.sum
2
go.sum
@@ -12,6 +12,8 @@ github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKs
|
|||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
|
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
|
||||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||||
|
github.com/winfsp/cgofuse v1.5.0 h1:MsBP7Mi/LiJf/7/F3O/7HjjR009ds6KCdqXzKpZSWxI=
|
||||||
|
github.com/winfsp/cgofuse v1.5.0/go.mod h1:h3awhoUOcn2VYVKCwDaYxSLlZwnyK+A8KaDoLUp2lbU=
|
||||||
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
|
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
|
||||||
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
|
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
|
||||||
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
|
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
package torrent
|
package torrent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"hash/fnv"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -19,6 +21,7 @@ import (
|
|||||||
|
|
||||||
type TorrentManager struct {
|
type TorrentManager struct {
|
||||||
config config.ConfigInterface
|
config config.ConfigInterface
|
||||||
|
DirectoryMap *orderedmap.OrderedMap[string, *orderedmap.OrderedMap[string, *Torrent]]
|
||||||
TorrentMap *orderedmap.OrderedMap[string, *Torrent] // accessKey -> Torrent
|
TorrentMap *orderedmap.OrderedMap[string, *Torrent] // accessKey -> Torrent
|
||||||
repairMap *orderedmap.OrderedMap[string, time.Time] // accessKey -> time last repaired
|
repairMap *orderedmap.OrderedMap[string, time.Time] // accessKey -> time last repaired
|
||||||
requiredVersion string
|
requiredVersion string
|
||||||
@@ -35,6 +38,7 @@ type TorrentManager struct {
|
|||||||
func NewTorrentManager(config config.ConfigInterface, api *realdebrid.RealDebrid) *TorrentManager {
|
func NewTorrentManager(config config.ConfigInterface, api *realdebrid.RealDebrid) *TorrentManager {
|
||||||
t := &TorrentManager{
|
t := &TorrentManager{
|
||||||
config: config,
|
config: config,
|
||||||
|
DirectoryMap: orderedmap.NewOrderedMap[string, *orderedmap.OrderedMap[string, *Torrent]](),
|
||||||
TorrentMap: orderedmap.NewOrderedMap[string, *Torrent](),
|
TorrentMap: orderedmap.NewOrderedMap[string, *Torrent](),
|
||||||
repairMap: orderedmap.NewOrderedMap[string, time.Time](),
|
repairMap: orderedmap.NewOrderedMap[string, time.Time](),
|
||||||
requiredVersion: "10.11.2023",
|
requiredVersion: "10.11.2023",
|
||||||
@@ -44,8 +48,6 @@ func NewTorrentManager(config config.ConfigInterface, api *realdebrid.RealDebrid
|
|||||||
log: logutil.NewLogger().Named("manager"),
|
log: logutil.NewLogger().Named("manager"),
|
||||||
}
|
}
|
||||||
|
|
||||||
t.mu.Lock()
|
|
||||||
|
|
||||||
newTorrents, _, err := t.api.GetTorrents(0)
|
newTorrents, _, err := t.api.GetTorrents(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.log.Fatalf("Cannot get torrents: %v\n", err)
|
t.log.Fatalf("Cannot get torrents: %v\n", err)
|
||||||
@@ -62,28 +64,34 @@ func NewTorrentManager(config config.ConfigInterface, api *realdebrid.RealDebrid
|
|||||||
<-t.workerPool
|
<-t.workerPool
|
||||||
}(i)
|
}(i)
|
||||||
}
|
}
|
||||||
t.log.Infof("Got %d torrents", len(newTorrents))
|
t.log.Infof("Received %d torrents", len(newTorrents))
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
t.log.Infof("Fetched info for %d torrents", len(newTorrents))
|
||||||
close(torrentsChan)
|
close(torrentsChan)
|
||||||
|
count := 0
|
||||||
for newTorrent := range torrentsChan {
|
for newTorrent := range torrentsChan {
|
||||||
if newTorrent == nil {
|
if newTorrent == nil {
|
||||||
|
count++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
torrent, _ := t.TorrentMap.Get(newTorrent.AccessKey)
|
torrent, _ := t.TorrentMap.Get(newTorrent.AccessKey)
|
||||||
if torrent != nil {
|
if torrent != nil {
|
||||||
|
t.mu.Lock()
|
||||||
t.TorrentMap.Set(newTorrent.AccessKey, t.mergeToMain(torrent, newTorrent))
|
t.TorrentMap.Set(newTorrent.AccessKey, t.mergeToMain(torrent, newTorrent))
|
||||||
} else {
|
|
||||||
t.TorrentMap.Set(newTorrent.AccessKey, newTorrent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.log.Infof("Compiled to %d unique movies and shows", t.TorrentMap.Len())
|
|
||||||
t.checksum = t.getChecksum()
|
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
|
} else {
|
||||||
|
t.mu.Lock()
|
||||||
|
t.TorrentMap.Set(newTorrent.AccessKey, newTorrent)
|
||||||
|
t.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.log.Infof("Compiled all torrents to %d unique movies and shows, %d were missing info", t.TorrentMap.Len(), count)
|
||||||
|
t.checksum = t.getChecksum()
|
||||||
|
|
||||||
if t.config.EnableRepair() {
|
if t.config.EnableRepair() {
|
||||||
go t.repairAll()
|
go t.repairAll()
|
||||||
}
|
}
|
||||||
go t.startRefreshJob()
|
// go t.startRefreshJob()
|
||||||
|
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
@@ -194,8 +202,6 @@ func (t *TorrentManager) startRefreshJob() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
t.mu.Lock()
|
|
||||||
|
|
||||||
newTorrents, _, err := t.api.GetTorrents(0)
|
newTorrents, _, err := t.api.GetTorrents(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.log.Warnf("Cannot get torrents: %v\n", err)
|
t.log.Warnf("Cannot get torrents: %v\n", err)
|
||||||
@@ -231,6 +237,15 @@ func (t *TorrentManager) startRefreshJob() {
|
|||||||
}
|
}
|
||||||
for _, accessKey := range toDelete {
|
for _, accessKey := range toDelete {
|
||||||
t.TorrentMap.Delete(accessKey)
|
t.TorrentMap.Delete(accessKey)
|
||||||
|
for el := t.DirectoryMap.Front(); el != nil; el = el.Next() {
|
||||||
|
torrents := el.Value
|
||||||
|
for el2 := torrents.Front(); el2 != nil; el2 = el2.Next() {
|
||||||
|
if el2.Key == accessKey {
|
||||||
|
torrents.Delete(accessKey)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
@@ -241,13 +256,16 @@ func (t *TorrentManager) startRefreshJob() {
|
|||||||
}
|
}
|
||||||
torrent, _ := t.TorrentMap.Get(newTorrent.AccessKey)
|
torrent, _ := t.TorrentMap.Get(newTorrent.AccessKey)
|
||||||
if torrent != nil {
|
if torrent != nil {
|
||||||
|
t.mu.Lock()
|
||||||
t.TorrentMap.Set(newTorrent.AccessKey, t.mergeToMain(torrent, newTorrent))
|
t.TorrentMap.Set(newTorrent.AccessKey, t.mergeToMain(torrent, newTorrent))
|
||||||
|
t.mu.Unlock()
|
||||||
} else {
|
} else {
|
||||||
|
t.mu.Lock()
|
||||||
t.TorrentMap.Set(newTorrent.AccessKey, newTorrent)
|
t.TorrentMap.Set(newTorrent.AccessKey, newTorrent)
|
||||||
|
t.mu.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.checksum = t.getChecksum()
|
t.checksum = t.getChecksum()
|
||||||
t.mu.Unlock()
|
|
||||||
|
|
||||||
if t.config.EnableRepair() {
|
if t.config.EnableRepair() {
|
||||||
go t.repairAll()
|
go t.repairAll()
|
||||||
@@ -296,6 +314,7 @@ func (t *TorrentManager) getMoreInfo(rdTorrent realdebrid.Torrent) *Torrent {
|
|||||||
File: file,
|
File: file,
|
||||||
Added: info.Added,
|
Added: info.Added,
|
||||||
Link: "", // no link yet
|
Link: "", // no link yet
|
||||||
|
ZurgFS: hashStringToFh(file.Path + info.Hash),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if selectedFiles.Len() > len(info.Links) && info.Progress == 100 {
|
if selectedFiles.Len() > len(info.Links) && info.Progress == 100 {
|
||||||
@@ -304,7 +323,7 @@ func (t *TorrentManager) getMoreInfo(rdTorrent realdebrid.Torrent) *Torrent {
|
|||||||
var isChaotic bool
|
var isChaotic bool
|
||||||
selectedFiles, isChaotic = t.organizeChaos(info.Links, selectedFiles)
|
selectedFiles, isChaotic = t.organizeChaos(info.Links, selectedFiles)
|
||||||
if isChaotic {
|
if isChaotic {
|
||||||
// t.log.Warnf("Torrent id=%s %s is unfixable, it is always returning an unstreamable link (it is no longer shown in your directories)", info.ID, info.Name)
|
t.log.Warnf("Torrent id=%s %s is unrepairable, it is always returning a rar file (it will no longer show up in your directories)", info.ID, info.Name)
|
||||||
// t.log.Debugf("You can try fixing it yourself magnet:?xt=urn:btih:%s", info.Hash)
|
// t.log.Debugf("You can try fixing it yourself magnet:?xt=urn:btih:%s", info.Hash)
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
@@ -316,7 +335,7 @@ func (t *TorrentManager) getMoreInfo(rdTorrent realdebrid.Torrent) *Torrent {
|
|||||||
t.log.Infof("Torrent id=%s %s marked for repair", info.ID, info.Name)
|
t.log.Infof("Torrent id=%s %s marked for repair", info.ID, info.Name)
|
||||||
forRepair = true
|
forRepair = true
|
||||||
} else {
|
} else {
|
||||||
// t.log.Warnf("Torrent id=%s %s is unfixable, the lone streamable link has expired (it is no longer shown in your directories)", info.ID, info.Name)
|
t.log.Warnf("Torrent id=%s %s is unrepairable, the lone streamable link has expired (it will no longer show up in your directories)", info.ID, info.Name)
|
||||||
// t.log.Debugf("You can try fixing it yourself magnet:?xt=urn:btih:%s", info.Hash)
|
// t.log.Debugf("You can try fixing it yourself magnet:?xt=urn:btih:%s", info.Hash)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -344,12 +363,31 @@ func (t *TorrentManager) getMoreInfo(rdTorrent realdebrid.Torrent) *Torrent {
|
|||||||
InProgress: info.Progress != 100,
|
InProgress: info.Progress != 100,
|
||||||
Instances: []realdebrid.TorrentInfo{*info},
|
Instances: []realdebrid.TorrentInfo{*info},
|
||||||
}
|
}
|
||||||
|
for _, directory := range torrent.Directories {
|
||||||
|
if _, ok := t.DirectoryMap.Get(directory); !ok {
|
||||||
|
newMap := orderedmap.NewOrderedMap[string, *Torrent]()
|
||||||
|
t.mu.Lock()
|
||||||
|
t.DirectoryMap.Set(directory, newMap)
|
||||||
|
t.mu.Unlock()
|
||||||
|
} else {
|
||||||
|
torrents, _ := t.DirectoryMap.Get(directory)
|
||||||
|
t.mu.Lock()
|
||||||
|
torrents.Set(torrent.AccessKey, &torrent)
|
||||||
|
t.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
if selectedFiles.Len() > 0 && torrentFromFile == nil {
|
if selectedFiles.Len() > 0 && torrentFromFile == nil {
|
||||||
t.writeToFile(info) // only when there are selected files, else it's useless
|
t.writeToFile(info) // only when there are selected files, else it's useless
|
||||||
}
|
}
|
||||||
return &torrent
|
return &torrent
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hashStringToFh(s string) (fh uint64) {
|
||||||
|
hasher := fnv.New64a()
|
||||||
|
_, _ = hasher.Write([]byte(s)) // Write the string to the hasher; ignoring error as it never returns an error
|
||||||
|
return hasher.Sum64() // Returns a 64-bit hash value
|
||||||
|
}
|
||||||
|
|
||||||
func (t *TorrentManager) getName(name, originalName string) string {
|
func (t *TorrentManager) getName(name, originalName string) string {
|
||||||
// drop the extension from the name
|
// drop the extension from the name
|
||||||
if t.config.EnableRetainFolderNameExtension() && strings.Contains(name, originalName) {
|
if t.config.EnableRetainFolderNameExtension() && strings.Contains(name, originalName) {
|
||||||
@@ -393,41 +431,40 @@ func (t *TorrentManager) getDirectories(torrent *realdebrid.TorrentInfo) []strin
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TorrentManager) writeToFile(torrent *realdebrid.TorrentInfo) {
|
func (t *TorrentManager) writeToFile(torrent *realdebrid.TorrentInfo) error {
|
||||||
filePath := fmt.Sprintf("data/%s.bin", torrent.ID)
|
filePath := "data/" + torrent.ID + ".bin"
|
||||||
file, err := os.Create(filePath)
|
file, err := os.Create(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.log.Fatalf("Failed creating file: %s", err)
|
return fmt.Errorf("failed creating file: %w", err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
|
w := bufio.NewWriter(file)
|
||||||
|
defer w.Flush()
|
||||||
|
|
||||||
torrent.Version = t.requiredVersion
|
torrent.Version = t.requiredVersion
|
||||||
dataEncoder := gob.NewEncoder(file)
|
dataEncoder := gob.NewEncoder(w)
|
||||||
dataEncoder.Encode(torrent)
|
if err := dataEncoder.Encode(torrent); err != nil {
|
||||||
|
return fmt.Errorf("failed encoding torrent: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TorrentManager) readFromFile(torrentID string) *realdebrid.TorrentInfo {
|
func (t *TorrentManager) readFromFile(torrentID string) *realdebrid.TorrentInfo {
|
||||||
filePath := fmt.Sprintf("data/%s.bin", torrentID)
|
filePath := "data/" + torrentID + ".bin"
|
||||||
fileInfo, err := os.Stat(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if time.Since(fileInfo.ModTime()) > time.Duration(t.config.GetCacheTimeHours())*time.Hour {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
file, err := os.Open(filePath)
|
file, err := os.Open(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
|
r := bufio.NewReader(file)
|
||||||
var torrent realdebrid.TorrentInfo
|
var torrent realdebrid.TorrentInfo
|
||||||
dataDecoder := gob.NewDecoder(file)
|
dataDecoder := gob.NewDecoder(r)
|
||||||
err = dataDecoder.Decode(&torrent)
|
if err := dataDecoder.Decode(&torrent); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if torrent.Version != t.requiredVersion {
|
if torrent.Version != t.requiredVersion {
|
||||||
@@ -484,6 +521,7 @@ func (t *TorrentManager) organizeChaos(links []string, selectedFiles *orderedmap
|
|||||||
},
|
},
|
||||||
Added: time.Now().Format(time.RFC3339),
|
Added: time.Now().Format(time.RFC3339),
|
||||||
Link: result.Response.Link,
|
Link: result.Response.Link,
|
||||||
|
ZurgFS: hashStringToFh(result.Response.Filename),
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
isChaotic = true
|
isChaotic = true
|
||||||
@@ -495,6 +533,7 @@ func (t *TorrentManager) organizeChaos(links []string, selectedFiles *orderedmap
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *TorrentManager) repairAll() {
|
func (t *TorrentManager) repairAll() {
|
||||||
|
t.log.Info("Checking for torrents to repair")
|
||||||
// side note: iteration works!
|
// side note: iteration works!
|
||||||
for el := t.TorrentMap.Front(); el != nil; el = el.Next() {
|
for el := t.TorrentMap.Front(); el != nil; el = el.Next() {
|
||||||
torrent := el.Value
|
torrent := el.Value
|
||||||
@@ -515,13 +554,15 @@ func (t *TorrentManager) repairAll() {
|
|||||||
if !forRepair {
|
if !forRepair {
|
||||||
// if it was marked for repair, unmark it
|
// if it was marked for repair, unmark it
|
||||||
torrent.ForRepair = false
|
torrent.ForRepair = false
|
||||||
|
t.mu.Lock()
|
||||||
t.TorrentMap.Set(torrent.AccessKey, torrent)
|
t.TorrentMap.Set(torrent.AccessKey, torrent)
|
||||||
|
t.mu.Unlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// when getting info, we mark it for repair if it's missing some links
|
// when getting info, we mark it for repair if it's missing some links
|
||||||
if torrent.ForRepair {
|
if torrent.ForRepair {
|
||||||
t.log.Infof("Found torrent for repair %s", torrent.AccessKey)
|
t.log.Infof("Found torrent for repair: %s", torrent.AccessKey)
|
||||||
t.Repair(torrent.AccessKey)
|
t.Repair(torrent.AccessKey)
|
||||||
break // only repair the first one for repair and then move on
|
break // only repair the first one for repair and then move on
|
||||||
}
|
}
|
||||||
@@ -534,7 +575,9 @@ func (t *TorrentManager) Repair(accessKey string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
t.mu.Lock()
|
||||||
t.repairMap.Set(accessKey, time.Now())
|
t.repairMap.Set(accessKey, time.Now())
|
||||||
|
t.mu.Unlock()
|
||||||
|
|
||||||
if !t.config.EnableRepair() {
|
if !t.config.EnableRepair() {
|
||||||
t.log.Warn("Repair is disabled; if you do not have other zurg instances running, you should enable repair")
|
t.log.Warn("Repair is disabled; if you do not have other zurg instances running, you should enable repair")
|
||||||
@@ -569,7 +612,9 @@ func (t *TorrentManager) Repair(accessKey string) {
|
|||||||
}
|
}
|
||||||
selectedFiles, _ := t.organizeChaos(links, torrent.SelectedFiles)
|
selectedFiles, _ := t.organizeChaos(links, torrent.SelectedFiles)
|
||||||
torrent.SelectedFiles = selectedFiles
|
torrent.SelectedFiles = selectedFiles
|
||||||
|
t.mu.Lock()
|
||||||
t.TorrentMap.Set(torrent.AccessKey, torrent)
|
t.TorrentMap.Set(torrent.AccessKey, torrent)
|
||||||
|
t.mu.Unlock()
|
||||||
|
|
||||||
// first solution: add the same selection, maybe it can be fixed by reinsertion?
|
// first solution: add the same selection, maybe it can be fixed by reinsertion?
|
||||||
if t.reinsertTorrent(torrent, "") {
|
if t.reinsertTorrent(torrent, "") {
|
||||||
|
|||||||
@@ -20,4 +20,5 @@ type File struct {
|
|||||||
realdebrid.File
|
realdebrid.File
|
||||||
Added string
|
Added string
|
||||||
Link string
|
Link string
|
||||||
|
ZurgFS uint64
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -160,8 +160,8 @@ func createErrorFile(path, link string) *intTor.File {
|
|||||||
return &ret
|
return &ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetFileReader(torrent *intTor.Torrent, file *intTor.File, offset int64, size int, t *intTor.TorrentManager, c config.ConfigInterface, log *zap.SugaredLogger) io.ReadCloser {
|
func GetFileReader(torrent *intTor.Torrent, file *intTor.File, offset int64, size int, torMgr *intTor.TorrentManager, cfg config.ConfigInterface, log *zap.SugaredLogger) []byte {
|
||||||
unres := t.UnrestrictUntilOk(file.Link)
|
unres := torMgr.UnrestrictUntilOk(file.Link)
|
||||||
if unres == nil {
|
if unres == nil {
|
||||||
if strings.Contains(file.Link, "www.youtube.com") {
|
if strings.Contains(file.Link, "www.youtube.com") {
|
||||||
log.Errorf("Even the error page is broken! Sorry!")
|
log.Errorf("Even the error page is broken! Sorry!")
|
||||||
@@ -169,10 +169,10 @@ func GetFileReader(torrent *intTor.Torrent, file *intTor.File, offset int64, siz
|
|||||||
}
|
}
|
||||||
log.Warnf("File %s is no longer available, torrent is marked for repair", file.Path)
|
log.Warnf("File %s is no longer available, torrent is marked for repair", file.Path)
|
||||||
if torrent != nil {
|
if torrent != nil {
|
||||||
go t.Repair(torrent.AccessKey)
|
go torMgr.Repair(torrent.AccessKey)
|
||||||
}
|
}
|
||||||
errFile := createErrorFile("unavailable.mp4", "https://www.youtube.com/watch?v=gea_FJrtFVA")
|
errFile := createErrorFile("unavailable.mp4", "https://www.youtube.com/watch?v=gea_FJrtFVA")
|
||||||
return GetFileReader(nil, errFile, 0, 0, t, c, log)
|
return GetFileReader(nil, errFile, 0, 0, torMgr, cfg, log)
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, unres.Download, nil)
|
req, err := http.NewRequest(http.MethodGet, unres.Download, nil)
|
||||||
@@ -183,15 +183,15 @@ func GetFileReader(torrent *intTor.Torrent, file *intTor.File, offset int64, siz
|
|||||||
}
|
}
|
||||||
log.Errorf("Error creating new request: %v", err)
|
log.Errorf("Error creating new request: %v", err)
|
||||||
errFile := createErrorFile("new_request.mp4", "https://www.youtube.com/watch?v=H3NSrObyAxM")
|
errFile := createErrorFile("new_request.mp4", "https://www.youtube.com/watch?v=H3NSrObyAxM")
|
||||||
return GetFileReader(nil, errFile, 0, 0, t, c, log)
|
return GetFileReader(nil, errFile, 0, 0, torMgr, cfg, log)
|
||||||
}
|
}
|
||||||
|
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
size = int(file.Bytes)
|
size = int(file.Bytes)
|
||||||
}
|
}
|
||||||
req.Header.Add("Range", fmt.Sprintf("bytes=%v-%v", offset, size-1))
|
req.Header.Add("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+int64(size)-1))
|
||||||
|
|
||||||
client := zurghttp.NewHTTPClient(c.GetToken(), 10, c)
|
client := zurghttp.NewHTTPClient(cfg.GetToken(), 10, cfg)
|
||||||
resp, err := client.Do(req)
|
resp, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(file.Link, "www.youtube.com") {
|
if strings.Contains(file.Link, "www.youtube.com") {
|
||||||
@@ -200,10 +200,10 @@ func GetFileReader(torrent *intTor.Torrent, file *intTor.File, offset int64, siz
|
|||||||
}
|
}
|
||||||
log.Warnf("Cannot download file %v ; torrent is marked for repair", err)
|
log.Warnf("Cannot download file %v ; torrent is marked for repair", err)
|
||||||
if torrent != nil {
|
if torrent != nil {
|
||||||
go t.Repair(torrent.AccessKey)
|
go torMgr.Repair(torrent.AccessKey)
|
||||||
}
|
}
|
||||||
errFile := createErrorFile("cannot_download.mp4", "https://www.youtube.com/watch?v=FSSd8cponAA")
|
errFile := createErrorFile("cannot_download.mp4", "https://www.youtube.com/watch?v=FSSd8cponAA")
|
||||||
return GetFileReader(nil, errFile, 0, 0, t, c, log)
|
return GetFileReader(nil, errFile, 0, 0, torMgr, cfg, log)
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||||
@@ -213,11 +213,19 @@ func GetFileReader(torrent *intTor.Torrent, file *intTor.File, offset int64, siz
|
|||||||
}
|
}
|
||||||
log.Warnf("Received a %s status code ; torrent is marked for repair", resp.Status)
|
log.Warnf("Received a %s status code ; torrent is marked for repair", resp.Status)
|
||||||
if torrent != nil {
|
if torrent != nil {
|
||||||
go t.Repair(torrent.AccessKey)
|
go torMgr.Repair(torrent.AccessKey)
|
||||||
}
|
}
|
||||||
errFile := createErrorFile("not_ok_status.mp4", "https://www.youtube.com/watch?v=BcseUxviVqE")
|
errFile := createErrorFile("not_ok_status.mp4", "https://www.youtube.com/watch?v=BcseUxviVqE")
|
||||||
return GetFileReader(nil, errFile, 0, 0, t, c, log)
|
return GetFileReader(nil, errFile, 0, 0, torMgr, cfg, log)
|
||||||
}
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
return resp.Body
|
requestedBytes, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
log.Errorf("Error reading bytes: %v", err)
|
||||||
|
errFile := createErrorFile("read_error.mp4", "https://www.youtube.com/watch?v=t9VgOriBHwE")
|
||||||
|
return GetFileReader(nil, errFile, 0, 0, torMgr, cfg, log)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return requestedBytes
|
||||||
}
|
}
|
||||||
|
|||||||
17
internal/version/version.go
Normal file
17
internal/version/version.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
package version
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
BuiltAt string
|
||||||
|
GoVersion string
|
||||||
|
GitCommit string
|
||||||
|
Version string = "dev"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Show() {
|
||||||
|
fmt.Printf("zurg\nBuilt At: %s\nGo Version: %s\nCommit: %s\nVersion: %s\n",
|
||||||
|
BuiltAt, GoVersion, GitCommit, Version)
|
||||||
|
}
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
package zfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"bazil.org/fuse/fs"
|
|
||||||
"github.com/debridmediamanager.com/zurg/internal/config"
|
|
||||||
"github.com/debridmediamanager.com/zurg/internal/torrent"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FS struct {
|
|
||||||
uid uint32
|
|
||||||
gid uint32
|
|
||||||
umask os.FileMode
|
|
||||||
directIO bool
|
|
||||||
lock sync.RWMutex
|
|
||||||
config config.ConfigInterface
|
|
||||||
tMgr *torrent.TorrentManager
|
|
||||||
log *zap.SugaredLogger
|
|
||||||
initTime time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root returns the root path
|
|
||||||
func (f *FS) Root() (fs.Node, error) {
|
|
||||||
return Object{
|
|
||||||
fs: f,
|
|
||||||
objType: ROOT,
|
|
||||||
mtime: f.initTime,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
@@ -1,54 +1,18 @@
|
|||||||
package zfs
|
package zfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"bazil.org/fuse"
|
|
||||||
"bazil.org/fuse/fs"
|
|
||||||
"github.com/debridmediamanager.com/zurg/internal/config"
|
"github.com/debridmediamanager.com/zurg/internal/config"
|
||||||
"github.com/debridmediamanager.com/zurg/internal/torrent"
|
"github.com/winfsp/cgofuse/fuse"
|
||||||
"github.com/debridmediamanager.com/zurg/pkg/logutil"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func Mount(mountpoint string, cfg config.ConfigInterface, tMgr *torrent.TorrentManager) error {
|
func Mount(host *fuse.FileSystemHost, cfg config.ConfigInterface) error {
|
||||||
log := logutil.NewLogger().Named("zfs")
|
host.SetCapCaseInsensitive(false)
|
||||||
|
host.SetCapReaddirPlus(false)
|
||||||
options := []fuse.MountOption{
|
host.Mount(cfg.GetMountPoint(), []string{})
|
||||||
fuse.AllowOther(),
|
|
||||||
fuse.AllowNonEmptyMount(),
|
|
||||||
fuse.MaxReadahead(uint32(128 << 10)),
|
|
||||||
fuse.DefaultPermissions(),
|
|
||||||
fuse.FSName("zurgfs"),
|
|
||||||
}
|
|
||||||
conn, err := fuse.Mount(mountpoint, options...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
srv := fs.New(conn, nil)
|
|
||||||
|
|
||||||
filesys := &FS{
|
|
||||||
uid: uint32(unix.Geteuid()),
|
|
||||||
gid: uint32(unix.Getegid()),
|
|
||||||
umask: os.FileMode(0),
|
|
||||||
config: cfg,
|
|
||||||
tMgr: tMgr,
|
|
||||||
log: log,
|
|
||||||
initTime: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := srv.Serve(filesys); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Unmount(mountpoint string) error {
|
func Unmount(host *fuse.FileSystemHost) error {
|
||||||
fuse.Unmount(mountpoint)
|
_ = host.Unmount()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,188 +0,0 @@
|
|||||||
package zfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"bazil.org/fuse"
|
|
||||||
"bazil.org/fuse/fs"
|
|
||||||
"github.com/debridmediamanager.com/zurg/internal/torrent"
|
|
||||||
"github.com/debridmediamanager.com/zurg/internal/universal"
|
|
||||||
)
|
|
||||||
|
|
||||||
// define variable as rootObject id
|
|
||||||
const (
|
|
||||||
ROOT = 0
|
|
||||||
DIRECTORY = 1
|
|
||||||
TORRENT = 2
|
|
||||||
FILE = 3
|
|
||||||
)
|
|
||||||
|
|
||||||
type Object struct {
|
|
||||||
fs *FS
|
|
||||||
objType int
|
|
||||||
parentName string
|
|
||||||
name string
|
|
||||||
torrent *torrent.Torrent
|
|
||||||
file *torrent.File
|
|
||||||
size uint64
|
|
||||||
mtime time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attr returns the attributes for a directory
|
|
||||||
func (o Object) Attr(ctx context.Context, attr *fuse.Attr) error {
|
|
||||||
if o.objType == FILE {
|
|
||||||
attr.Mode = 0644
|
|
||||||
} else {
|
|
||||||
attr.Mode = os.ModeDir | 0755
|
|
||||||
}
|
|
||||||
attr.Size = o.size
|
|
||||||
|
|
||||||
attr.Uid = o.fs.uid
|
|
||||||
attr.Gid = o.fs.gid
|
|
||||||
|
|
||||||
attr.Ctime = o.mtime
|
|
||||||
attr.Mtime = o.mtime
|
|
||||||
|
|
||||||
attr.Blocks = (attr.Size + 511) >> 9
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadDirAll shows all files in the current directory
|
|
||||||
func (o Object) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
|
||||||
dirs := []fuse.Dirent{}
|
|
||||||
switch o.objType {
|
|
||||||
case ROOT:
|
|
||||||
for _, directory := range o.fs.config.GetDirectories() {
|
|
||||||
dirs = append(dirs, fuse.Dirent{
|
|
||||||
Name: directory,
|
|
||||||
Type: fuse.DT_Dir,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
case DIRECTORY:
|
|
||||||
for el := o.fs.tMgr.TorrentMap.Front(); el != nil; el = el.Next() {
|
|
||||||
torrent := el.Value
|
|
||||||
if torrent.InProgress {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dirs = append(dirs, fuse.Dirent{
|
|
||||||
Name: torrent.AccessKey,
|
|
||||||
Type: fuse.DT_Dir,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
case TORRENT:
|
|
||||||
torrent, _ := o.fs.tMgr.TorrentMap.Get(o.name)
|
|
||||||
if torrent == nil || torrent.InProgress {
|
|
||||||
return nil, syscall.ENOENT
|
|
||||||
}
|
|
||||||
for el := torrent.SelectedFiles.Front(); el != nil; el = el.Next() {
|
|
||||||
file := el.Value
|
|
||||||
if file.Link == "" {
|
|
||||||
// log.Println("File has no link, skipping", file.Path)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
filename := filepath.Base(file.Path)
|
|
||||||
dirs = append(dirs, fuse.Dirent{
|
|
||||||
Name: filename,
|
|
||||||
Type: fuse.DT_File,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dirs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lookup tests if a file is existent in the current directory
|
|
||||||
func (o Object) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
|
||||||
switch o.objType {
|
|
||||||
case ROOT:
|
|
||||||
for _, directory := range o.fs.config.GetDirectories() {
|
|
||||||
if directory == name {
|
|
||||||
return Object{
|
|
||||||
fs: o.fs,
|
|
||||||
objType: DIRECTORY,
|
|
||||||
parentName: o.name,
|
|
||||||
name: name,
|
|
||||||
mtime: o.fs.initTime,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case DIRECTORY:
|
|
||||||
torrent, _ := o.fs.tMgr.TorrentMap.Get(name)
|
|
||||||
if torrent == nil {
|
|
||||||
return nil, syscall.ENOENT
|
|
||||||
}
|
|
||||||
return Object{
|
|
||||||
fs: o.fs,
|
|
||||||
objType: TORRENT,
|
|
||||||
parentName: o.name,
|
|
||||||
name: name,
|
|
||||||
mtime: convertRFC3339toTime(torrent.LatestAdded),
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
case TORRENT:
|
|
||||||
torrent, _ := o.fs.tMgr.TorrentMap.Get(o.name)
|
|
||||||
if torrent == nil {
|
|
||||||
return nil, syscall.ENOENT
|
|
||||||
}
|
|
||||||
file, _ := torrent.SelectedFiles.Get(name)
|
|
||||||
if file == nil {
|
|
||||||
return nil, syscall.ENOENT
|
|
||||||
}
|
|
||||||
return Object{
|
|
||||||
fs: o.fs,
|
|
||||||
objType: FILE,
|
|
||||||
parentName: o.name,
|
|
||||||
name: name,
|
|
||||||
torrent: torrent,
|
|
||||||
file: file,
|
|
||||||
size: uint64(file.Bytes),
|
|
||||||
mtime: convertRFC3339toTime(file.Added),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return nil, syscall.ENOENT
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open a file
|
|
||||||
func (o Object) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
|
|
||||||
resp.Flags |= fuse.OpenDirectIO
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads some bytes or the whole file
|
|
||||||
func (o Object) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
|
|
||||||
reader := universal.GetFileReader(o.torrent, o.file, req.Offset, int(req.Size), o.fs.tMgr, o.fs.config, o.fs.log)
|
|
||||||
if reader == nil {
|
|
||||||
return syscall.EIO
|
|
||||||
}
|
|
||||||
defer reader.Close()
|
|
||||||
resp.Data = make([]byte, req.Size)
|
|
||||||
_, err := reader.Read(resp.Data)
|
|
||||||
if err != nil && err.Error() != "EOF" {
|
|
||||||
o.fs.log.Errorf("Error reading bytes from Real-Debrid: %v", err)
|
|
||||||
return syscall.EIO
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove deletes an element
|
|
||||||
func (o Object) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
|
|
||||||
return fmt.Errorf("Remove not yet implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rename renames an element
|
|
||||||
func (o Object) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error {
|
|
||||||
return fmt.Errorf("Rename not yet implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertRFC3339toTime(input string) time.Time {
|
|
||||||
t, err := time.Parse(time.RFC3339, input)
|
|
||||||
if err != nil {
|
|
||||||
return time.Now()
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
185
internal/zfs/zfs.go
Normal file
185
internal/zfs/zfs.go
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
package zfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/debridmediamanager.com/zurg/internal/config"
|
||||||
|
"github.com/debridmediamanager.com/zurg/internal/torrent"
|
||||||
|
"github.com/debridmediamanager.com/zurg/pkg/chunk"
|
||||||
|
"github.com/winfsp/cgofuse/fuse"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ZurgFS struct {
|
||||||
|
fuse.FileSystemBase
|
||||||
|
TorrentManager *torrent.TorrentManager
|
||||||
|
Config config.ConfigInterface
|
||||||
|
Chunk *chunk.Manager
|
||||||
|
Log *zap.SugaredLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewZurgFS(tm *torrent.TorrentManager, cfg config.ConfigInterface, chunk *chunk.Manager, log *zap.SugaredLogger) *ZurgFS {
|
||||||
|
return &ZurgFS{
|
||||||
|
TorrentManager: tm,
|
||||||
|
Config: cfg,
|
||||||
|
Chunk: chunk,
|
||||||
|
Log: log,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *ZurgFS) Open(path string, flags int) (errc int, fh uint64) {
|
||||||
|
segments := splitIntoSegments(path)
|
||||||
|
switch len(segments) {
|
||||||
|
case 0:
|
||||||
|
return 0, 0
|
||||||
|
case 1:
|
||||||
|
if _, dirFound := fs.TorrentManager.DirectoryMap.Get(segments[0]); !dirFound {
|
||||||
|
return -fuse.ENOENT, ^uint64(0)
|
||||||
|
}
|
||||||
|
return 0, 0
|
||||||
|
case 2:
|
||||||
|
if directory, dirFound := fs.TorrentManager.DirectoryMap.Get(segments[0]); !dirFound {
|
||||||
|
return -fuse.ENOENT, ^uint64(0)
|
||||||
|
} else if _, torFound := directory.Get(segments[1]); !torFound {
|
||||||
|
return -fuse.ENOENT, ^uint64(0)
|
||||||
|
}
|
||||||
|
return 0, 0
|
||||||
|
case 3:
|
||||||
|
if directory, dirFound := fs.TorrentManager.DirectoryMap.Get(segments[0]); !dirFound {
|
||||||
|
return -fuse.ENOENT, ^uint64(0)
|
||||||
|
} else if torrent, torFound := directory.Get(segments[1]); !torFound {
|
||||||
|
return -fuse.ENOENT, ^uint64(0)
|
||||||
|
} else if file, fileFound := torrent.SelectedFiles.Get(segments[2]); !fileFound {
|
||||||
|
return -fuse.ENOENT, ^uint64(0)
|
||||||
|
} else {
|
||||||
|
return 0, file.ZurgFS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return -fuse.ENOENT, ^uint64(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *ZurgFS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
|
||||||
|
segments := splitIntoSegments(path)
|
||||||
|
switch len(segments) {
|
||||||
|
case 0:
|
||||||
|
stat.Mode = fuse.S_IFDIR | 0555
|
||||||
|
return 0
|
||||||
|
case 1:
|
||||||
|
if _, dirFound := fs.TorrentManager.DirectoryMap.Get(segments[0]); !dirFound {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
}
|
||||||
|
stat.Mode = fuse.S_IFDIR | 0555
|
||||||
|
return 0
|
||||||
|
case 2:
|
||||||
|
if directory, dirFound := fs.TorrentManager.DirectoryMap.Get(segments[0]); !dirFound {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
} else if _, torFound := directory.Get(segments[1]); !torFound {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
}
|
||||||
|
stat.Mode = fuse.S_IFDIR | 0555
|
||||||
|
return 0
|
||||||
|
case 3:
|
||||||
|
if directory, dirFound := fs.TorrentManager.DirectoryMap.Get(segments[0]); !dirFound {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
} else if torrent, torFound := directory.Get(segments[1]); !torFound {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
} else if file, fileFound := torrent.SelectedFiles.Get(segments[2]); !fileFound {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
} else {
|
||||||
|
stat.Mode = fuse.S_IFREG | 0444
|
||||||
|
stat.Size = file.Bytes
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return -fuse.ENOENT
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *ZurgFS) Read(path string, buff []byte, ofst int64, fh uint64) (n int) {
|
||||||
|
segments := splitIntoSegments(path)
|
||||||
|
if len(segments) != 3 {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
} else if directory, dirFound := fs.TorrentManager.DirectoryMap.Get(segments[0]); !dirFound {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
} else if torrent, torFound := directory.Get(segments[1]); !torFound {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
} else if file, fileFound := torrent.SelectedFiles.Get(segments[2]); !fileFound {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
} else {
|
||||||
|
size := int64(len(buff))
|
||||||
|
if size < int64(fs.Config.GetNetworkBufferSize()) {
|
||||||
|
size = int64(fs.Config.GetNetworkBufferSize())
|
||||||
|
}
|
||||||
|
endofst := ofst + size
|
||||||
|
if endofst > file.Bytes {
|
||||||
|
endofst = file.Bytes
|
||||||
|
}
|
||||||
|
if endofst < ofst {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
response, err := fs.Chunk.GetChunk(file, ofst, size)
|
||||||
|
if err != nil {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
}
|
||||||
|
// response := universal.GetFileReader(torrent, file, ofst, int(size), fs.TorrentManager, fs.Config, fs.Log)
|
||||||
|
// if response == nil {
|
||||||
|
// return -fuse.ENOENT
|
||||||
|
// }
|
||||||
|
n = copy(buff, response[:endofst-ofst])
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *ZurgFS) Readdir(path string,
|
||||||
|
fill func(name string, stat *fuse.Stat_t, ofst int64) bool,
|
||||||
|
ofst int64,
|
||||||
|
fh uint64) (errc int) {
|
||||||
|
|
||||||
|
segments := splitIntoSegments(path)
|
||||||
|
switch len(segments) {
|
||||||
|
case 0:
|
||||||
|
fill(".", nil, 0)
|
||||||
|
fill("..", nil, 0)
|
||||||
|
for el := fs.TorrentManager.DirectoryMap.Front(); el != nil; el = el.Next() {
|
||||||
|
fill(el.Key, nil, 0)
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
fill(".", nil, 0)
|
||||||
|
fill("..", nil, 0)
|
||||||
|
if directory, dirFound := fs.TorrentManager.DirectoryMap.Get(segments[0]); !dirFound {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
} else {
|
||||||
|
for el := directory.Front(); el != nil; el = el.Next() {
|
||||||
|
fill(el.Key, nil, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
fill(".", nil, 0)
|
||||||
|
fill("..", nil, 0)
|
||||||
|
if directory, dirFound := fs.TorrentManager.DirectoryMap.Get(segments[0]); !dirFound {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
} else if torrent, torFound := directory.Get(segments[1]); !torFound {
|
||||||
|
return -fuse.ENOENT
|
||||||
|
} else {
|
||||||
|
for el := torrent.SelectedFiles.Front(); el != nil; el = el.Next() {
|
||||||
|
fill(el.Key, nil, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return -fuse.ENOENT
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitIntoSegments(path string) []string {
|
||||||
|
segments := strings.Split(path, "/")
|
||||||
|
// remove empty segments
|
||||||
|
for i := 0; i < len(segments); i++ {
|
||||||
|
if segments[i] == "" {
|
||||||
|
segments = append(segments[:i], segments[i+1:]...)
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return segments
|
||||||
|
}
|
||||||
10
package.json
Normal file
10
package.json
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"name": "zurg",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"description": "webdav server containing an infinite library of movies and tv shows",
|
||||||
|
"main": "index.js",
|
||||||
|
"repository": "git@github.com:debridmediamanager/zurg.git",
|
||||||
|
"author": "Ben Adrian Sarmiento <me@bensarmiento.com>",
|
||||||
|
"license": "MIT",
|
||||||
|
"private": false
|
||||||
|
}
|
||||||
51
pkg/chunk/chunk.go
Normal file
51
pkg/chunk/chunk.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/list"
|
||||||
|
"hash/crc32"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Chunk of memory
|
||||||
|
type Chunk struct {
|
||||||
|
clean bool
|
||||||
|
*chunkHeader
|
||||||
|
bytes []byte
|
||||||
|
item *list.Element
|
||||||
|
}
|
||||||
|
|
||||||
|
type chunkHeader struct {
|
||||||
|
id RequestID
|
||||||
|
size uint32
|
||||||
|
checksum uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Chunk) valid(id RequestID) bool {
|
||||||
|
if c.id != id {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !c.clean {
|
||||||
|
c.clean = c.checksum == c.calculateChecksum()
|
||||||
|
}
|
||||||
|
return c.clean
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Chunk) update(id RequestID, bytes []byte) {
|
||||||
|
c.id = id
|
||||||
|
c.size = uint32(copy(c.bytes, bytes))
|
||||||
|
c.checksum = c.calculateChecksum()
|
||||||
|
c.clean = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Chunk) calculateChecksum() uint32 {
|
||||||
|
size := c.size
|
||||||
|
if nil == c.bytes || size == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
maxSize := uint32(len(c.bytes))
|
||||||
|
if size > maxSize {
|
||||||
|
// corrupt size or truncated chunk, fix size
|
||||||
|
c.size = maxSize
|
||||||
|
return crc32.Checksum(c.bytes, crc32Table)
|
||||||
|
}
|
||||||
|
return crc32.Checksum(c.bytes[:size], crc32Table)
|
||||||
|
}
|
||||||
146
pkg/chunk/download.go
Normal file
146
pkg/chunk/download.go
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/debridmediamanager.com/zurg/internal/config"
|
||||||
|
"github.com/debridmediamanager.com/zurg/internal/torrent"
|
||||||
|
"github.com/debridmediamanager.com/zurg/pkg/logutil"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Downloader handles concurrent chunk downloads
|
||||||
|
type Downloader struct {
|
||||||
|
BufferSize int64
|
||||||
|
queue chan *Request
|
||||||
|
callbacks map[RequestID][]DownloadCallback
|
||||||
|
lock sync.Mutex
|
||||||
|
storage *Storage
|
||||||
|
c config.ConfigInterface
|
||||||
|
t *torrent.TorrentManager
|
||||||
|
log *zap.SugaredLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
type DownloadCallback func(error, []byte)
|
||||||
|
|
||||||
|
// NewDownloader creates a new download manager
|
||||||
|
func NewDownloader(threads int, storage *Storage, bufferSize int64, t *torrent.TorrentManager, c config.ConfigInterface) (*Downloader, error) {
|
||||||
|
rlog := logutil.NewLogger()
|
||||||
|
log := rlog.Named("downloader")
|
||||||
|
|
||||||
|
manager := Downloader{
|
||||||
|
BufferSize: bufferSize,
|
||||||
|
queue: make(chan *Request, 100),
|
||||||
|
callbacks: make(map[RequestID][]DownloadCallback, 100),
|
||||||
|
storage: storage,
|
||||||
|
c: c,
|
||||||
|
t: t,
|
||||||
|
log: log,
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < threads; i++ {
|
||||||
|
go manager.thread(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &manager, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download starts a new download request
|
||||||
|
func (d *Downloader) Download(req *Request, callback DownloadCallback) {
|
||||||
|
d.lock.Lock()
|
||||||
|
callbacks, exists := d.callbacks[req.id]
|
||||||
|
if nil != callback {
|
||||||
|
d.callbacks[req.id] = append(callbacks, callback)
|
||||||
|
} else if !exists {
|
||||||
|
d.callbacks[req.id] = callbacks
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
d.queue <- req
|
||||||
|
}
|
||||||
|
d.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Downloader) thread(n int) {
|
||||||
|
buffer, err := unix.Mmap(-1, 0, int(d.BufferSize), syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE)
|
||||||
|
if nil != err {
|
||||||
|
d.log.Warnf("Failed to mmap download buffer %v: %v", n, err)
|
||||||
|
buffer = make([]byte, d.BufferSize)
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
req := <-d.queue
|
||||||
|
d.download(req, buffer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Downloader) download(req *Request, buffer []byte) {
|
||||||
|
d.log.Debugf("Starting download %v (preload: %v)", req.id, req.preload)
|
||||||
|
err := d.downloadFromAPI(req, buffer, 0)
|
||||||
|
|
||||||
|
d.lock.Lock()
|
||||||
|
callbacks := d.callbacks[req.id]
|
||||||
|
for _, callback := range callbacks {
|
||||||
|
callback(err, buffer)
|
||||||
|
}
|
||||||
|
delete(d.callbacks, req.id)
|
||||||
|
d.lock.Unlock()
|
||||||
|
|
||||||
|
if nil != err {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := d.storage.Store(req.id, buffer); nil != err {
|
||||||
|
d.log.Warnf("Could not store chunk %v: %v", req.id, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Downloader) downloadFromAPI(request *Request, buffer []byte, delay int64) error {
|
||||||
|
// sleep if request is throttled
|
||||||
|
if delay > 0 {
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := d.t.UnrestrictUntilOk(request.file.Link)
|
||||||
|
if resp == nil {
|
||||||
|
return fmt.Errorf("cannot unrestrict file %s %s", request.file.Path, request.file.Link)
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadURL := resp.Download
|
||||||
|
req, err := http.NewRequest("GET", downloadURL, nil)
|
||||||
|
if nil != err {
|
||||||
|
d.log.Debugf("request init error: %v", err)
|
||||||
|
return fmt.Errorf("could not create request object %s %s from API", request.file.Path, request.file.Link)
|
||||||
|
}
|
||||||
|
req.Header.Add("Range", fmt.Sprintf("bytes=%v-%v", request.offsetStart, request.offsetEnd-1))
|
||||||
|
|
||||||
|
res, err := http.DefaultClient.Do(req)
|
||||||
|
if nil != err {
|
||||||
|
d.log.Debugf("request error: %v", err)
|
||||||
|
return fmt.Errorf("could not request object %s %s from API", request.file.Path, request.file.Link)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
reader := res.Body
|
||||||
|
|
||||||
|
if res.StatusCode != 206 && res.StatusCode != 200 {
|
||||||
|
return fmt.Errorf("could not read object %s %s / StatusCode: %v",
|
||||||
|
request.file.Path, request.file.Link, res.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.ContentLength == -1 {
|
||||||
|
return fmt.Errorf("missing Content-Length header in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := io.ReadFull(reader, buffer[:res.ContentLength:cap(buffer)])
|
||||||
|
if nil != err && err != io.ErrUnexpectedEOF {
|
||||||
|
d.log.Debugf("response read error: %v", err)
|
||||||
|
return fmt.Errorf("could not read objects %s %s API response", request.file.Path, request.file.Link)
|
||||||
|
}
|
||||||
|
d.log.Debugf("Downloaded %v bytes of %s %s", n, request.file.Path, request.file.Link)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
266
pkg/chunk/manager.go
Normal file
266
pkg/chunk/manager.go
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/debridmediamanager.com/zurg/internal/config"
|
||||||
|
"github.com/debridmediamanager.com/zurg/internal/torrent"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Manager manages chunks on disk
|
||||||
|
type Manager struct {
|
||||||
|
ChunkSize int64
|
||||||
|
LoadAhead int
|
||||||
|
downloader *Downloader
|
||||||
|
storage *Storage
|
||||||
|
queue chan *QueueEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
type QueueEntry struct {
|
||||||
|
request *Request
|
||||||
|
response chan Response
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestID is the binary identifier for a chunk request
|
||||||
|
type RequestID [24]byte
|
||||||
|
|
||||||
|
func (id RequestID) String() string {
|
||||||
|
return fmt.Sprintf("%032x:%v", id[:16], binary.BigEndian.Uint64(id[16:]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request represents a chunk request
|
||||||
|
type Request struct {
|
||||||
|
id RequestID
|
||||||
|
file *torrent.File
|
||||||
|
offsetStart int64
|
||||||
|
offsetEnd int64
|
||||||
|
chunkOffset int64
|
||||||
|
chunkOffsetEnd int64
|
||||||
|
sequence int
|
||||||
|
preload bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response represetns a chunk response
|
||||||
|
type Response struct {
|
||||||
|
Sequence int
|
||||||
|
Error error
|
||||||
|
Bytes []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewManager creates a new chunk manager
|
||||||
|
func NewManager(
|
||||||
|
chunkFile string,
|
||||||
|
chunkSize int64,
|
||||||
|
loadAhead,
|
||||||
|
checkThreads,
|
||||||
|
loadThreads,
|
||||||
|
maxChunks int,
|
||||||
|
t *torrent.TorrentManager,
|
||||||
|
c config.ConfigInterface) (*Manager, error) {
|
||||||
|
|
||||||
|
pageSize := int64(os.Getpagesize())
|
||||||
|
if chunkSize < pageSize {
|
||||||
|
return nil, fmt.Errorf("chunk size must not be < %v", pageSize)
|
||||||
|
}
|
||||||
|
if chunkSize%pageSize != 0 {
|
||||||
|
return nil, fmt.Errorf("chunk size must be divideable by %v", pageSize)
|
||||||
|
}
|
||||||
|
// 32-Bit: ~2GB / 64-Bit: ~8EB
|
||||||
|
maxMmapSize := int64(^uint(0) >> 1)
|
||||||
|
if chunkSize > maxMmapSize {
|
||||||
|
return nil, fmt.Errorf("chunk size must be < %v", maxMmapSize)
|
||||||
|
}
|
||||||
|
if maxChunks < 2 || maxChunks < loadAhead {
|
||||||
|
return nil, fmt.Errorf("max-chunks must be greater than 2 and bigger than the load ahead value")
|
||||||
|
}
|
||||||
|
|
||||||
|
storage, err := NewStorage(chunkSize, maxChunks, maxMmapSize, chunkFile)
|
||||||
|
if nil != err {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
downloader, err := NewDownloader(loadThreads, storage, chunkSize, t, c)
|
||||||
|
if nil != err {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
manager := Manager{
|
||||||
|
ChunkSize: chunkSize,
|
||||||
|
LoadAhead: loadAhead,
|
||||||
|
downloader: downloader,
|
||||||
|
storage: storage,
|
||||||
|
queue: make(chan *QueueEntry, 100),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := manager.storage.Clear(); nil != err {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < checkThreads; i++ {
|
||||||
|
go manager.thread()
|
||||||
|
}
|
||||||
|
|
||||||
|
return &manager, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetChunk loads one chunk and starts the preload for the next chunks
|
||||||
|
func (m *Manager) GetChunk(file *torrent.File, offset, size int64) ([]byte, error) {
|
||||||
|
maxOffset := file.Bytes
|
||||||
|
if offset > maxOffset {
|
||||||
|
return nil, fmt.Errorf("tried to read past EOF of %v at offset %v", file.ID, offset)
|
||||||
|
}
|
||||||
|
// Log.Infof("Request %v:%v md5:%v", object.ID, offset, object.MD5Checksum)
|
||||||
|
if offset+size > maxOffset {
|
||||||
|
size = file.Bytes - offset
|
||||||
|
}
|
||||||
|
|
||||||
|
ranges := splitChunkRanges(offset, size, m.ChunkSize)
|
||||||
|
numRanges := len(ranges)
|
||||||
|
responses := make(chan Response, numRanges)
|
||||||
|
|
||||||
|
last := numRanges - 1
|
||||||
|
for i, r := range ranges {
|
||||||
|
m.requestChunk(file, r.offset, r.size, i, i == last, responses)
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make([]byte, size)
|
||||||
|
for i := 0; i < cap(responses); i++ {
|
||||||
|
res := <-responses
|
||||||
|
if nil != res.Error {
|
||||||
|
return nil, res.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
dataOffset := ranges[res.Sequence].offset - offset
|
||||||
|
|
||||||
|
if n := copy(data[dataOffset:], res.Bytes); n == 0 {
|
||||||
|
return nil, fmt.Errorf("request %v slice %v has empty response", file.ID, res.Sequence)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(responses)
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildRequestID(object *torrent.File, offset int64) (id RequestID) {
|
||||||
|
fileID := object.Link
|
||||||
|
if fileID == "" {
|
||||||
|
fileID = object.Path
|
||||||
|
}
|
||||||
|
hash := sha256.Sum256([]byte(fileID))
|
||||||
|
copy(id[:16], hash[:16])
|
||||||
|
binary.BigEndian.PutUint64(id[16:], uint64(offset))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) requestChunk(object *torrent.File, offset, size int64, sequence int, preload bool, response chan Response) {
|
||||||
|
chunkOffset := offset % m.ChunkSize
|
||||||
|
offsetStart := offset - chunkOffset
|
||||||
|
offsetEnd := offsetStart + m.ChunkSize
|
||||||
|
|
||||||
|
request := &Request{
|
||||||
|
id: buildRequestID(object, offsetStart),
|
||||||
|
file: object,
|
||||||
|
offsetStart: offsetStart,
|
||||||
|
offsetEnd: offsetEnd,
|
||||||
|
chunkOffset: chunkOffset,
|
||||||
|
chunkOffsetEnd: chunkOffset + size,
|
||||||
|
sequence: sequence,
|
||||||
|
preload: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
m.queue <- &QueueEntry{
|
||||||
|
request: request,
|
||||||
|
response: response,
|
||||||
|
}
|
||||||
|
|
||||||
|
if !preload {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := m.ChunkSize; i < (m.ChunkSize * int64(m.LoadAhead+1)); i += m.ChunkSize {
|
||||||
|
aheadOffsetStart := offsetStart + i
|
||||||
|
aheadOffsetEnd := aheadOffsetStart + m.ChunkSize
|
||||||
|
if uint64(aheadOffsetStart) < uint64(object.Bytes) && uint64(aheadOffsetEnd) < uint64(object.Bytes) {
|
||||||
|
request := &Request{
|
||||||
|
id: buildRequestID(object, aheadOffsetStart),
|
||||||
|
file: object,
|
||||||
|
offsetStart: aheadOffsetStart,
|
||||||
|
offsetEnd: aheadOffsetEnd,
|
||||||
|
preload: true,
|
||||||
|
}
|
||||||
|
m.queue <- &QueueEntry{
|
||||||
|
request: request,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type byteRange struct {
|
||||||
|
offset, size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate request ranges that span multiple chunks
|
||||||
|
//
|
||||||
|
// This can happen with Direct-IO and unaligned reads or
|
||||||
|
// if the size is bigger than the chunk size.
|
||||||
|
func splitChunkRanges(offset, size, chunkSize int64) []byteRange {
|
||||||
|
ranges := make([]byteRange, 0, size/chunkSize+2)
|
||||||
|
for remaining := size; remaining > 0; remaining -= size {
|
||||||
|
size = min(remaining, chunkSize-offset%chunkSize)
|
||||||
|
ranges = append(ranges, byteRange{offset, size})
|
||||||
|
offset += size
|
||||||
|
}
|
||||||
|
return ranges
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) thread() {
|
||||||
|
for {
|
||||||
|
queueEntry := <-m.queue
|
||||||
|
m.checkChunk(queueEntry.request, queueEntry.response)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) checkChunk(req *Request, response chan Response) {
|
||||||
|
if nil == response {
|
||||||
|
if nil == m.storage.Load(req.id) {
|
||||||
|
m.downloader.Download(req, nil)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes := m.storage.Load(req.id); nil != bytes {
|
||||||
|
response <- Response{
|
||||||
|
Sequence: req.sequence,
|
||||||
|
Bytes: adjustResponseChunk(req, bytes),
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m.downloader.Download(req, func(err error, bytes []byte) {
|
||||||
|
response <- Response{
|
||||||
|
Sequence: req.sequence,
|
||||||
|
Error: err,
|
||||||
|
Bytes: adjustResponseChunk(req, bytes),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func adjustResponseChunk(req *Request, bytes []byte) []byte {
|
||||||
|
if nil == bytes {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
bytesLen := int64(len(bytes))
|
||||||
|
sOffset := min(req.chunkOffset, bytesLen)
|
||||||
|
eOffset := min(req.chunkOffsetEnd, bytesLen)
|
||||||
|
return bytes[sOffset:eOffset]
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(x, y int64) int64 {
|
||||||
|
if x < y {
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
return y
|
||||||
|
}
|
||||||
53
pkg/chunk/manager_test.go
Normal file
53
pkg/chunk/manager_test.go
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestSplitChunkRanges(t *testing.T) {
|
||||||
|
testcases := []struct {
|
||||||
|
offset, size, chunkSize int64
|
||||||
|
result []byteRange
|
||||||
|
}{
|
||||||
|
{0, 0, 4096, []byteRange{}},
|
||||||
|
{0, 4096, 4096, []byteRange{
|
||||||
|
{0, 4096},
|
||||||
|
}},
|
||||||
|
{4095, 4096, 4096, []byteRange{
|
||||||
|
{4095, 1},
|
||||||
|
{4096, 4095},
|
||||||
|
}},
|
||||||
|
{0, 8192, 4096, []byteRange{
|
||||||
|
{0, 4096},
|
||||||
|
{4096, 4096},
|
||||||
|
}},
|
||||||
|
{2048, 8192, 4096, []byteRange{
|
||||||
|
{2048, 2048},
|
||||||
|
{4096, 4096},
|
||||||
|
{8192, 2048},
|
||||||
|
}},
|
||||||
|
{2048, 8192, 4096, []byteRange{
|
||||||
|
{2048, 2048},
|
||||||
|
{4096, 4096},
|
||||||
|
{8192, 2048},
|
||||||
|
}},
|
||||||
|
{17960960, 16777216, 10485760, []byteRange{
|
||||||
|
{17960960, 3010560},
|
||||||
|
{20971520, 10485760},
|
||||||
|
{31457280, 3280896},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
for i, tc := range testcases {
|
||||||
|
ranges := splitChunkRanges(tc.offset, tc.size, tc.chunkSize)
|
||||||
|
actualSize := len(ranges)
|
||||||
|
expectedSize := len(tc.result)
|
||||||
|
if actualSize != expectedSize {
|
||||||
|
t.Fatalf("ByteRange %v length mismatch: %v != %v", i, actualSize, expectedSize)
|
||||||
|
}
|
||||||
|
for j, r := range ranges {
|
||||||
|
actual := r
|
||||||
|
expected := tc.result[j]
|
||||||
|
if actual != expected {
|
||||||
|
t.Fatalf("ByteRange %v mismatch: %v != %v", i, actual, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
75
pkg/chunk/stack.go
Normal file
75
pkg/chunk/stack.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/list"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Stack is a thread safe list/stack implementation
|
||||||
|
type Stack struct {
|
||||||
|
items *list.List
|
||||||
|
lock sync.Mutex
|
||||||
|
maxSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStack creates a new stack
|
||||||
|
func NewStack(maxChunks int) *Stack {
|
||||||
|
return &Stack{
|
||||||
|
items: list.New(),
|
||||||
|
maxSize: maxChunks,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len gets the number of items on the stack
|
||||||
|
func (s *Stack) Len() int {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
return s.items.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pop pops the first item from the stack
|
||||||
|
func (s *Stack) Pop() int {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
if s.items.Len() < s.maxSize {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
item := s.items.Front()
|
||||||
|
if nil == item {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
s.items.Remove(item)
|
||||||
|
return item.Value.(int)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Touch moves the specified item to the last position of the stack
|
||||||
|
func (s *Stack) Touch(item *list.Element) {
|
||||||
|
s.lock.Lock()
|
||||||
|
if item != s.items.Back() {
|
||||||
|
s.items.MoveToBack(item)
|
||||||
|
}
|
||||||
|
s.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push adds a new item to the last position of the stack
|
||||||
|
func (s *Stack) Push(id int) *list.Element {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
return s.items.PushBack(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepend adds a list to the front of the stack
|
||||||
|
func (s *Stack) Prepend(items *list.List) {
|
||||||
|
s.lock.Lock()
|
||||||
|
s.items.PushFrontList(items)
|
||||||
|
s.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge an item from the stack
|
||||||
|
func (s *Stack) Purge(item *list.Element) {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
if item != s.items.Front() {
|
||||||
|
s.items.MoveToFront(item)
|
||||||
|
}
|
||||||
|
}
|
||||||
72
pkg/chunk/stack_test.go
Normal file
72
pkg/chunk/stack_test.go
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestOOB(t *testing.T) {
|
||||||
|
stack := NewStack(1)
|
||||||
|
|
||||||
|
item := stack.Push(1)
|
||||||
|
stack.Touch(item)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddToStack(t *testing.T) {
|
||||||
|
stack := NewStack(1)
|
||||||
|
|
||||||
|
item1 := stack.Push(1)
|
||||||
|
item2 := stack.Push(2)
|
||||||
|
item3 := stack.Push(3)
|
||||||
|
item4 := stack.Push(4)
|
||||||
|
|
||||||
|
stack.Touch(item1)
|
||||||
|
stack.Touch(item3)
|
||||||
|
|
||||||
|
stack.Purge(item2)
|
||||||
|
stack.Purge(item4)
|
||||||
|
|
||||||
|
v := stack.Pop()
|
||||||
|
if v != 4 {
|
||||||
|
t.Fatalf("Expected 4 got %v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
v = stack.Pop()
|
||||||
|
if v != 2 {
|
||||||
|
t.Fatalf("Expected 2 got %v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
v = stack.Pop()
|
||||||
|
if v != 1 {
|
||||||
|
t.Fatalf("Expected 1 got %v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
v = stack.Pop()
|
||||||
|
if v != 3 {
|
||||||
|
t.Fatalf("Expected 3 got %v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
v = stack.Pop()
|
||||||
|
if v != -1 {
|
||||||
|
t.Fatalf("Expected -1 got %v", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLen(t *testing.T) {
|
||||||
|
stack := NewStack(1)
|
||||||
|
|
||||||
|
v := stack.Len()
|
||||||
|
if v != 0 {
|
||||||
|
t.Fatalf("Expected 0 got %v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
stack.Push(1)
|
||||||
|
v = stack.Len()
|
||||||
|
if v != 1 {
|
||||||
|
t.Fatalf("Expected 1 got %v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = stack.Pop()
|
||||||
|
v = stack.Len()
|
||||||
|
if v != 0 {
|
||||||
|
t.Fatalf("Expected 0 got %v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
420
pkg/chunk/storage.go
Normal file
420
pkg/chunk/storage.go
Normal file
@@ -0,0 +1,420 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/list"
|
||||||
|
"fmt"
|
||||||
|
"hash/crc32"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
"github.com/debridmediamanager.com/zurg/pkg/logutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
headerSize = int(unsafe.Sizeof(*new(chunkHeader)))
|
||||||
|
tocSize = int64(unsafe.Sizeof(*new(journalHeader)))
|
||||||
|
journalMagic = uint16('P'<<8 | 'D'&0xFF)
|
||||||
|
journalVersion = uint8(2)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
blankRequestID RequestID
|
||||||
|
crc32Table = crc32.MakeTable(crc32.Castagnoli)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Storage is a chunk storage
|
||||||
|
type Storage struct {
|
||||||
|
ChunkFile *os.File
|
||||||
|
ChunkSize int64
|
||||||
|
HeaderSize int64
|
||||||
|
MaxChunks int
|
||||||
|
chunks map[RequestID]int
|
||||||
|
stack *Stack
|
||||||
|
lock sync.RWMutex
|
||||||
|
buffers []*Chunk
|
||||||
|
loadChunks int
|
||||||
|
signals chan os.Signal
|
||||||
|
journal []byte
|
||||||
|
mmapRegions [][]byte
|
||||||
|
chunksPerRegion int64
|
||||||
|
log *zap.SugaredLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
type journalHeader struct {
|
||||||
|
magic uint16
|
||||||
|
version uint8
|
||||||
|
headerSize uint8
|
||||||
|
maxChunks uint32
|
||||||
|
chunkSize uint32
|
||||||
|
checksum uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStorage creates a new storage
|
||||||
|
func NewStorage(chunkSize int64, maxChunks int, maxMmapSize int64, chunkFilePath string) (*Storage, error) {
|
||||||
|
rlog := logutil.NewLogger()
|
||||||
|
log := rlog.Named("storage")
|
||||||
|
|
||||||
|
s := Storage{
|
||||||
|
ChunkSize: chunkSize,
|
||||||
|
MaxChunks: maxChunks,
|
||||||
|
chunks: make(map[RequestID]int, maxChunks),
|
||||||
|
stack: NewStack(maxChunks),
|
||||||
|
buffers: make([]*Chunk, maxChunks),
|
||||||
|
signals: make(chan os.Signal, 1),
|
||||||
|
log: log,
|
||||||
|
}
|
||||||
|
|
||||||
|
journalSize := tocSize + int64(headerSize*maxChunks)
|
||||||
|
journalOffset := chunkSize * int64(maxChunks)
|
||||||
|
|
||||||
|
// Non-empty string in chunkFilePath enables MMAP disk storage for chunks
|
||||||
|
if chunkFilePath != "" {
|
||||||
|
chunkFile, err := os.OpenFile(chunkFilePath, os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if nil != err {
|
||||||
|
s.log.Debugf("%v", err)
|
||||||
|
return nil, fmt.Errorf("could not open chunk cache file")
|
||||||
|
}
|
||||||
|
s.ChunkFile = chunkFile
|
||||||
|
currentSize, err := chunkFile.Seek(0, os.SEEK_END)
|
||||||
|
if nil != err {
|
||||||
|
s.log.Debugf("%v", err)
|
||||||
|
return nil, fmt.Errorf("chunk file is not seekable")
|
||||||
|
}
|
||||||
|
wantedSize := journalOffset + journalSize
|
||||||
|
s.log.Debugf("Current chunk cache file size: %v B (wanted: %v B)", currentSize, wantedSize)
|
||||||
|
if err := chunkFile.Truncate(currentSize); nil != err {
|
||||||
|
s.log.Warnf("Could not truncate chunk cache, skip resizing")
|
||||||
|
} else if currentSize != wantedSize {
|
||||||
|
if currentSize > tocSize {
|
||||||
|
err = s.relocateJournal(currentSize, wantedSize, journalSize, journalOffset)
|
||||||
|
if nil != err {
|
||||||
|
s.log.Errorf("%v", err)
|
||||||
|
} else {
|
||||||
|
s.log.Infof("Relocated chunk cache journal")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := chunkFile.Truncate(wantedSize); nil != err {
|
||||||
|
s.log.Debugf("%v", err)
|
||||||
|
return nil, fmt.Errorf("could not resize chunk cache file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.log.Infof("Created chunk cache file %v", chunkFile.Name())
|
||||||
|
s.loadChunks = int(min(currentSize/chunkSize, int64(maxChunks)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alocate journal
|
||||||
|
if journal, err := s.mmap(journalOffset, journalSize); nil != err {
|
||||||
|
return nil, fmt.Errorf("could not allocate journal: %v", err)
|
||||||
|
} else {
|
||||||
|
if err := unix.Madvise(journal, syscall.MADV_RANDOM); nil != err {
|
||||||
|
s.log.Warnf("Madvise MADV_RANDOM for journal failed: %v", err)
|
||||||
|
}
|
||||||
|
tocOffset := journalSize - tocSize
|
||||||
|
header := journal[tocOffset:]
|
||||||
|
if valid := s.checkJournal(header, false); !valid {
|
||||||
|
s.initJournal(header)
|
||||||
|
}
|
||||||
|
s.journal = journal[:tocOffset]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup sighandler
|
||||||
|
signal.Notify(s.signals, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
|
||||||
|
// Allocate mmap regions for chunks
|
||||||
|
if err := s.allocateMmapRegions(maxMmapSize); nil != err {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Map chunks to slices from mmap regions
|
||||||
|
if err := s.mmapChunks(); nil != err {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// relocateJournal moves existing journal prior to resize
|
||||||
|
func (s *Storage) relocateJournal(currentSize, wantedSize, journalSize, journalOffset int64) error {
|
||||||
|
header := make([]byte, tocSize)
|
||||||
|
if _, err := s.ChunkFile.ReadAt(header, currentSize-tocSize); nil != err {
|
||||||
|
return fmt.Errorf("failed to read journal header: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if valid := s.checkJournal(header, true); !valid {
|
||||||
|
return fmt.Errorf("failed to validate journal header")
|
||||||
|
}
|
||||||
|
|
||||||
|
h := (*journalHeader)(unsafe.Pointer(&header[0]))
|
||||||
|
oldJournalOffset := s.ChunkSize * int64(h.maxChunks)
|
||||||
|
oldJournalSize := min(journalSize, currentSize-oldJournalOffset) - tocSize
|
||||||
|
journal := make([]byte, journalSize)
|
||||||
|
|
||||||
|
if _, err := s.ChunkFile.ReadAt(journal[:oldJournalSize], oldJournalOffset); nil != err {
|
||||||
|
return fmt.Errorf("failed to read journal: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.initJournal(header)
|
||||||
|
|
||||||
|
sizeWithoutJournal := currentSize - oldJournalSize - tocSize
|
||||||
|
if err := s.ChunkFile.Truncate(sizeWithoutJournal); nil != err {
|
||||||
|
return fmt.Errorf("could not truncate chunk cache journal: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.ChunkFile.Truncate(wantedSize); nil != err {
|
||||||
|
return fmt.Errorf("could not resize chunk cache file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := s.ChunkFile.WriteAt(journal, journalOffset); nil != err {
|
||||||
|
return fmt.Errorf("failed to write journal: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := s.ChunkFile.WriteAt(header, wantedSize-tocSize); nil != err {
|
||||||
|
return fmt.Errorf("failed to write journal header: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkJournal verifies the journal header
|
||||||
|
func (s *Storage) checkJournal(journal []byte, skipMaxChunks bool) bool {
|
||||||
|
h := (*journalHeader)(unsafe.Pointer(&journal[0]))
|
||||||
|
// check magic bytes / endianess mismatch ('PD' vs 'DP')
|
||||||
|
if h.magic != journalMagic {
|
||||||
|
s.log.Debugf("Journal magic mismatch: %v != %v", h.magic, journalMagic)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
checksum := crc32.Checksum(journal[:12], crc32Table)
|
||||||
|
if h.checksum != checksum {
|
||||||
|
s.log.Debugf("Journal checksum mismatch: %08X != %08X", h.checksum, checksum)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if h.version != journalVersion {
|
||||||
|
s.log.Debugf("Journal version mismatch: %v != %v", h.version, journalVersion)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if h.headerSize != uint8(headerSize) {
|
||||||
|
s.log.Debugf("Journal chunk header size mismatch: %v != %v", h.headerSize, headerSize)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !skipMaxChunks && h.maxChunks != uint32(s.MaxChunks) {
|
||||||
|
s.log.Debugf("Journal max chunks mismatch: %v != %v", h.maxChunks, s.MaxChunks)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if h.chunkSize != uint32(s.ChunkSize) {
|
||||||
|
s.log.Debugf("Journal chunk size mismatch: %v != %v", h.chunkSize, s.ChunkSize)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s.log.Debug("Journal is valid")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// initJournal initializes the journal
|
||||||
|
func (s *Storage) initJournal(journal []byte) {
|
||||||
|
h := (*journalHeader)(unsafe.Pointer(&journal[0]))
|
||||||
|
h.magic = journalMagic
|
||||||
|
h.version = journalVersion
|
||||||
|
h.headerSize = uint8(headerSize)
|
||||||
|
h.maxChunks = uint32(s.MaxChunks)
|
||||||
|
h.chunkSize = uint32(s.ChunkSize)
|
||||||
|
h.checksum = crc32.Checksum(journal[:12], crc32Table)
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocateMmapRegions creates memory mappings to fit all chunks
|
||||||
|
func (s *Storage) allocateMmapRegions(maxMmapSize int64) error {
|
||||||
|
s.chunksPerRegion = maxMmapSize / s.ChunkSize
|
||||||
|
regionSize := s.chunksPerRegion * s.ChunkSize
|
||||||
|
numRegions := int64(s.MaxChunks) / s.chunksPerRegion
|
||||||
|
remChunks := int64(s.MaxChunks) % s.chunksPerRegion
|
||||||
|
if remChunks != 0 {
|
||||||
|
numRegions++
|
||||||
|
}
|
||||||
|
s.mmapRegions = make([][]byte, numRegions)
|
||||||
|
for i := int64(0); i < int64(len(s.mmapRegions)); i++ {
|
||||||
|
size := regionSize
|
||||||
|
if i == numRegions-1 && remChunks != 0 {
|
||||||
|
size = remChunks * s.ChunkSize
|
||||||
|
}
|
||||||
|
s.log.Debugf("Allocate mmap region %v/%v with size %v B", i+1, numRegions, size)
|
||||||
|
region, err := s.mmap(i*regionSize, size)
|
||||||
|
if nil != err {
|
||||||
|
s.log.Errorf("failed to mmap region %v/%v with size %v B", i+1, numRegions, size)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := unix.Madvise(region, syscall.MADV_SEQUENTIAL); nil != err {
|
||||||
|
s.log.Warnf("Madvise MADV_SEQUENTIAL for region %v/%v failed: %v", i+1, numRegions, err)
|
||||||
|
}
|
||||||
|
s.mmapRegions[i] = region
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mmapChunks slices buffers from mmap regions and loads chunk metadata
|
||||||
|
func (s *Storage) mmapChunks() error {
|
||||||
|
start := time.Now()
|
||||||
|
empty := list.New()
|
||||||
|
restored := list.New()
|
||||||
|
loadedChunks := 0
|
||||||
|
for i := 0; i < s.MaxChunks; i++ {
|
||||||
|
select {
|
||||||
|
case sig := <-s.signals:
|
||||||
|
s.log.Warnf("Received signal %v, aborting chunk loader", sig)
|
||||||
|
return fmt.Errorf("aborted by signal")
|
||||||
|
default:
|
||||||
|
if loaded, err := s.initChunk(i, empty, restored); nil != err {
|
||||||
|
s.log.Errorf("failed to allocate chunk %v: %v", i, err)
|
||||||
|
return fmt.Errorf("failed to initialize chunks")
|
||||||
|
} else if loaded {
|
||||||
|
loadedChunks++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.stack.Prepend(restored)
|
||||||
|
s.stack.Prepend(empty)
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
if nil != s.ChunkFile {
|
||||||
|
s.log.Infof("Loaded %v/%v cache chunks in %v", loadedChunks, s.MaxChunks, elapsed)
|
||||||
|
} else {
|
||||||
|
s.log.Infof("Allocated %v cache chunks in %v", s.MaxChunks, elapsed)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// initChunk tries to restore a chunk from disk
|
||||||
|
func (s *Storage) initChunk(index int, empty *list.List, restored *list.List) (bool, error) {
|
||||||
|
chunk, err := s.allocateChunk(index)
|
||||||
|
if err != nil {
|
||||||
|
s.log.Debugf("%v", err)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.buffers[index] = chunk
|
||||||
|
|
||||||
|
id := chunk.id
|
||||||
|
|
||||||
|
if blankRequestID == id || index >= s.loadChunks {
|
||||||
|
chunk.item = empty.PushBack(index)
|
||||||
|
// s.log.Tracef("Allocate chunk %v/%v", index+1, s.MaxChunks)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk.item = restored.PushBack(index)
|
||||||
|
// s.log.Tracef("Load chunk %v/%v (restored: %v)", index+1, s.MaxChunks, id)
|
||||||
|
s.chunks[id] = index
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocateChunk creates a new mmap-backed chunk
|
||||||
|
func (s *Storage) allocateChunk(index int) (*Chunk, error) {
|
||||||
|
region := int64(index) / s.chunksPerRegion
|
||||||
|
offset := (int64(index) - region*s.chunksPerRegion) * s.ChunkSize
|
||||||
|
// s.log.Tracef("Allocate chunk %v from region %v at offset %v", index+1, region, offset)
|
||||||
|
bytes := s.mmapRegions[region][offset : offset+s.ChunkSize : offset+s.ChunkSize]
|
||||||
|
headerOffset := index * headerSize
|
||||||
|
header := (*chunkHeader)(unsafe.Pointer(&s.journal[headerOffset]))
|
||||||
|
chunk := Chunk{
|
||||||
|
chunkHeader: header,
|
||||||
|
bytes: bytes,
|
||||||
|
}
|
||||||
|
return &chunk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Storage) mmap(offset, size int64) ([]byte, error) {
|
||||||
|
if s.ChunkFile != nil {
|
||||||
|
return unix.Mmap(int(s.ChunkFile.Fd()), offset, int(size), syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
|
||||||
|
} else {
|
||||||
|
return unix.Mmap(-1, 0, int(size), syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear removes all old chunks on disk (will be called on each program start)
|
||||||
|
func (s *Storage) Clear() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load a chunk from ram or creates it
|
||||||
|
func (s *Storage) Load(id RequestID) []byte {
|
||||||
|
s.lock.RLock()
|
||||||
|
chunk := s.fetch(id)
|
||||||
|
if nil == chunk {
|
||||||
|
// s.log.Tracef("Load chunk %v (missing)", id)
|
||||||
|
s.lock.RUnlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if chunk.clean {
|
||||||
|
// s.log.Tracef("Load chunk %v (clean)", id)
|
||||||
|
defer s.lock.RUnlock()
|
||||||
|
return chunk.bytes
|
||||||
|
}
|
||||||
|
s.lock.RUnlock()
|
||||||
|
// Switch to write lock to avoid races on crc verification
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
if chunk.valid(id) {
|
||||||
|
s.log.Debugf("Load chunk %v (verified)", id)
|
||||||
|
return chunk.bytes
|
||||||
|
}
|
||||||
|
s.log.Warnf("Load chunk %v (bad checksum: %08x <> %08x)", id, chunk.checksum, chunk.calculateChecksum())
|
||||||
|
s.stack.Purge(chunk.item)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store stores a chunk in the RAM and adds it to the disk storage queue
|
||||||
|
func (s *Storage) Store(id RequestID, bytes []byte) (err error) {
|
||||||
|
s.lock.RLock()
|
||||||
|
|
||||||
|
// Avoid storing same chunk multiple times
|
||||||
|
chunk := s.fetch(id)
|
||||||
|
if nil != chunk && chunk.clean {
|
||||||
|
// s.log.Tracef("Create chunk %v (exists: clean)", id)
|
||||||
|
s.lock.RUnlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
if nil != chunk {
|
||||||
|
if chunk.valid(id) {
|
||||||
|
s.log.Debugf("Create chunk %v (exists: valid)", id)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
s.log.Warnf("Create chunk %v(exists: overwrite)", id)
|
||||||
|
} else {
|
||||||
|
index := s.stack.Pop()
|
||||||
|
if index == -1 {
|
||||||
|
s.log.Debugf("Create chunk %v (failed)", id)
|
||||||
|
return fmt.Errorf("no buffers available")
|
||||||
|
}
|
||||||
|
chunk = s.buffers[index]
|
||||||
|
deleteID := chunk.id
|
||||||
|
if blankRequestID != deleteID {
|
||||||
|
delete(s.chunks, deleteID)
|
||||||
|
s.log.Debugf("Create chunk %v (reused)", id)
|
||||||
|
} else {
|
||||||
|
s.log.Debugf("Create chunk %v (stored)", id)
|
||||||
|
}
|
||||||
|
s.chunks[id] = index
|
||||||
|
chunk.item = s.stack.Push(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk.update(id, bytes)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetch chunk and index by id
|
||||||
|
func (s *Storage) fetch(id RequestID) *Chunk {
|
||||||
|
index, exists := s.chunks[id]
|
||||||
|
if !exists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
chunk := s.buffers[index]
|
||||||
|
s.stack.Touch(chunk.item)
|
||||||
|
return chunk
|
||||||
|
}
|
||||||
@@ -42,7 +42,7 @@ func (r *HTTPClient) Do(req *http.Request) (*http.Response, error) {
|
|||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHTTPClient(token string, maxRetries int, c config.ConfigInterface) *HTTPClient {
|
func NewHTTPClient(token string, maxRetries int, cfg config.ConfigInterface) *HTTPClient {
|
||||||
return &HTTPClient{
|
return &HTTPClient{
|
||||||
BearerToken: token,
|
BearerToken: token,
|
||||||
Client: &http.Client{},
|
Client: &http.Client{},
|
||||||
@@ -61,6 +61,6 @@ func NewHTTPClient(token string, maxRetries int, c config.ConfigInterface) *HTTP
|
|||||||
return false
|
return false
|
||||||
},
|
},
|
||||||
log: logutil.NewLogger().Named("client"),
|
log: logutil.NewLogger().Named("client"),
|
||||||
config: c,
|
config: cfg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user