reorganize HTTP routes, improve JSON response
This commit is contained in:
parent
112ab0e08f
commit
72e6355869
|
@ -16,12 +16,11 @@ files stored in a very complicated directory tree in just 5 minutes.
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- Automated cache management. Fill the cache when the starts, or as requests come in.
|
- Automated cache management. Fill the cache when the starts, or as requests come in.
|
||||||
|
- Front end agnostic design.
|
||||||
|
- Elasticsearch integration.
|
||||||
- File browsing API.
|
- File browsing API.
|
||||||
- Download API.
|
- Download API.
|
||||||
- Restrict certain files and directories from the download API to prevent users from downloading your entire 100GB+
|
- Admin API.
|
||||||
dataset.
|
|
||||||
- Frontend-agnostic design.
|
|
||||||
- Basic searching or Elasticsearch integration.
|
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
|
@ -35,7 +34,7 @@ files stored in a very complicated directory tree in just 5 minutes.
|
||||||
|
|
||||||
By default, it looks for your config in the same directory as the executable: `./config.yml` or `./config.yaml`.
|
By default, it looks for your config in the same directory as the executable: `./config.yml` or `./config.yaml`.
|
||||||
|
|
||||||
If you're using initial cache and have tons of files to scan you'll need at least 5GB of RAM and will have to wait 10 or
|
If you're using initial cache and have tons of files to scan you'll need at least 5GB of RAM and will have to wait 5 or
|
||||||
so minutes for it to traverse the directory structure. CrazyFS is heavily threaded, so you'll want at least an 8-core
|
so minutes for it to traverse the directory structure. CrazyFS is heavily threaded, so you'll want at least an 8-core
|
||||||
machine.
|
machine.
|
||||||
|
|
||||||
|
|
|
@ -1,28 +0,0 @@
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crazyfs/CacheItem"
|
|
||||||
"crazyfs/cache"
|
|
||||||
"crazyfs/cache/DirectoryCrawler"
|
|
||||||
"encoding/json"
|
|
||||||
lru "github.com/hashicorp/golang-lru/v2"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: show the time the initial crawl started
|
|
||||||
|
|
||||||
func HealthCheck(w http.ResponseWriter, r *http.Request, sharedCache *lru.Cache[string, *CacheItem.Item]) {
|
|
||||||
//log := logging.GetLogger()
|
|
||||||
|
|
||||||
response := map[string]interface{}{}
|
|
||||||
|
|
||||||
response["scan_running"] = DirectoryCrawler.GetTotalActiveCrawls() > 0
|
|
||||||
response["initial_scan_running"] = cache.InitialCrawlInProgress
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
err := json.NewEncoder(w).Encode(response)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("HEALTH - Failed to serialize JSON: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,26 +0,0 @@
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crazyfs/CacheItem"
|
|
||||||
"crazyfs/cache"
|
|
||||||
"crazyfs/cache/DirectoryCrawler"
|
|
||||||
"encoding/json"
|
|
||||||
lru "github.com/hashicorp/golang-lru/v2"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: show the time the initial crawl started
|
|
||||||
|
|
||||||
func ClientHealthCheck(w http.ResponseWriter, r *http.Request, sharedCache *lru.Cache[string, *CacheItem.Item]) {
|
|
||||||
response := map[string]interface{}{}
|
|
||||||
|
|
||||||
response["scan_running"] = DirectoryCrawler.GetTotalActiveCrawls() > 0
|
|
||||||
response["initial_scan_running"] = cache.InitialCrawlInProgress
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
err := json.NewEncoder(w).Encode(response)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("HEALTH - Failed to serialize JSON: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,22 +0,0 @@
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crazyfs/CacheItem"
|
|
||||||
"crazyfs/config"
|
|
||||||
"encoding/json"
|
|
||||||
lru "github.com/hashicorp/golang-lru/v2"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func RestrictedDownloadDirectories(w http.ResponseWriter, r *http.Request, sharedCache *lru.Cache[string, *CacheItem.Item]) {
|
|
||||||
response := map[string]interface{}{
|
|
||||||
"restricted_download_directories": config.GetConfig().RestrictedDownloadPaths,
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
err := json.NewEncoder(w).Encode(response)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("AdminCacheInfo - Failed to serialize JSON: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,87 @@
|
||||||
|
package helpers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crazyfs/CacheItem"
|
||||||
|
"crazyfs/cache"
|
||||||
|
"crazyfs/cache/DirectoryCrawler"
|
||||||
|
"github.com/hashicorp/golang-lru/v2"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HandleFileNotFound if the data is not in the cache, start a new crawler
|
||||||
|
func HandleFileNotFound(relPath string, fullPath string, sharedCache *lru.Cache[string, *CacheItem.Item], w http.ResponseWriter) *CacheItem.Item {
|
||||||
|
// TODO: implement some sort of backoff or delay for repeated calls to recache the same path.
|
||||||
|
|
||||||
|
log.Debugf("CRAWLER - %s not in cache, crawling", fullPath)
|
||||||
|
dc := DirectoryCrawler.NewDirectoryCrawler(sharedCache)
|
||||||
|
|
||||||
|
// Check if this is a symlink. We do this before CrawlNoRecursion() because we want to tell the end user that
|
||||||
|
// we're not going to resolve this symlink.
|
||||||
|
//info, err := os.Lstat(fullPath)
|
||||||
|
//if err != nil {
|
||||||
|
// log.Errorf("HandleFileNotFound - os.Lstat failed: %s", err)
|
||||||
|
// Return500Msg(w)
|
||||||
|
// return nil
|
||||||
|
//}
|
||||||
|
//if !config.FollowSymlinks && info.Mode()&os.ModeSymlink > 0 {
|
||||||
|
// Return400Msg("path is a symlink", w)
|
||||||
|
// return nil
|
||||||
|
//}
|
||||||
|
|
||||||
|
// Start a recursive crawl in the background immediately, so we don't risk the client disconnecting before we've had
|
||||||
|
// a chance to kick of a recursive crawl.
|
||||||
|
go func() {
|
||||||
|
log.Debugf("Starting background recursive crawl for %s", fullPath)
|
||||||
|
dc := DirectoryCrawler.NewDirectoryCrawler(sharedCache)
|
||||||
|
start := time.Now()
|
||||||
|
err := dc.Crawl(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("LIST - background recursive crawl failed: %s", err)
|
||||||
|
}
|
||||||
|
log.Debugf("Finished background recursive crawl for %s, elapsed time: %s", fullPath, time.Since(start).Round(time.Second))
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Start a blocking non-recursive crawl.
|
||||||
|
item, err := dc.CrawlNoRecursion(fullPath)
|
||||||
|
if err == nil && (os.IsNotExist(err) || item == nil) {
|
||||||
|
ReturnFake404Msg("path not found", w)
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
log.Errorf("HandleFileNotFound - crawl failed: %s", err)
|
||||||
|
Return500Msg(w)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to get the data from the cache again.
|
||||||
|
item, found := sharedCache.Get(relPath)
|
||||||
|
if !found {
|
||||||
|
// TODO: let's not re-check the disk if the file is still not in the cache. Instead, let's just assume that it doesn't exist.
|
||||||
|
ReturnFake404Msg("path not found", w)
|
||||||
|
|
||||||
|
// TODO: this is the old code in case this isn't the right approach.
|
||||||
|
// If the data is still not in the cache, check if the file or directory exists.
|
||||||
|
// We could check if the file exists before checking the cache but we want to limit disk reads.
|
||||||
|
//if _, err := os.Stat(fullPath); os.IsNotExist(err) {
|
||||||
|
// log.Debugf("File not in cache: %s", fullPath)
|
||||||
|
// // If the file or directory does not exist, return a 404 status code and a message
|
||||||
|
// ReturnFake404Msg("file or directory not found", w)
|
||||||
|
// return nil
|
||||||
|
//} else if err != nil {
|
||||||
|
// // If there was an error checking if the file or directory exists, return a 500 status code and the error
|
||||||
|
// log.Errorf("LIST - %s", err.Error())
|
||||||
|
// Return500Msg(w)
|
||||||
|
// return nil
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If CacheItem is still nil, error
|
||||||
|
if item == nil {
|
||||||
|
log.Errorf("LIST - crawler failed to find %s and did not return a 404", relPath)
|
||||||
|
Return500Msg(w)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
cache.CheckAndRecache(fullPath, sharedCache)
|
||||||
|
return item
|
||||||
|
}
|
|
@ -0,0 +1,36 @@
|
||||||
|
package helpers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WriteJsonResponse formats/prettifies the JSON response and handles any errors during transmission.
|
||||||
|
func WriteJsonResponse(response any, minified bool, w http.ResponseWriter, r *http.Request) {
|
||||||
|
var jsonResponse []byte
|
||||||
|
var err error
|
||||||
|
if !minified {
|
||||||
|
jsonResponse, err = json.MarshalIndent(response, "", " ")
|
||||||
|
} else {
|
||||||
|
jsonData, err := json.Marshal(response)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error marshaling the map: %v", err)
|
||||||
|
} else {
|
||||||
|
var compactedBuffer bytes.Buffer
|
||||||
|
err = json.Compact(&compactedBuffer, jsonData)
|
||||||
|
if err == nil {
|
||||||
|
jsonResponse = compactedBuffer.Bytes()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to serialize JSON: %s - %s", err, r.URL.RequestURI())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
_, err = w.Write(jsonResponse)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to write JSON response: %s - %s", err, r.URL.RequestURI())
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,93 +1,11 @@
|
||||||
package helpers
|
package helpers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crazyfs/CacheItem"
|
|
||||||
"crazyfs/cache"
|
"crazyfs/cache"
|
||||||
"crazyfs/cache/DirectoryCrawler"
|
|
||||||
"crazyfs/config"
|
"crazyfs/config"
|
||||||
lru "github.com/hashicorp/golang-lru/v2"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// HandleFileNotFound if the data is not in the cache, start a new crawler
|
|
||||||
func HandleFileNotFound(relPath string, fullPath string, sharedCache *lru.Cache[string, *CacheItem.Item], w http.ResponseWriter) *CacheItem.Item {
|
|
||||||
// TODO: implement some sort of backoff or delay for repeated calls to recache the same path.
|
|
||||||
|
|
||||||
log.Debugf("CRAWLER - %s not in cache, crawling", fullPath)
|
|
||||||
dc := DirectoryCrawler.NewDirectoryCrawler(sharedCache)
|
|
||||||
|
|
||||||
// Check if this is a symlink. We do this before CrawlNoRecursion() because we want to tell the end user that
|
|
||||||
// we're not going to resolve this symlink.
|
|
||||||
//info, err := os.Lstat(fullPath)
|
|
||||||
//if err != nil {
|
|
||||||
// log.Errorf("HandleFileNotFound - os.Lstat failed: %s", err)
|
|
||||||
// Return500Msg(w)
|
|
||||||
// return nil
|
|
||||||
//}
|
|
||||||
//if !config.FollowSymlinks && info.Mode()&os.ModeSymlink > 0 {
|
|
||||||
// Return400Msg("path is a symlink", w)
|
|
||||||
// return nil
|
|
||||||
//}
|
|
||||||
|
|
||||||
// Start a recursive crawl in the background immediately, so we don't risk the client disconnecting before we've had
|
|
||||||
// a chance to kick of a recursive crawl.
|
|
||||||
go func() {
|
|
||||||
log.Debugf("Starting background recursive crawl for %s", fullPath)
|
|
||||||
dc := DirectoryCrawler.NewDirectoryCrawler(sharedCache)
|
|
||||||
start := time.Now()
|
|
||||||
err := dc.Crawl(fullPath)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("LIST - background recursive crawl failed: %s", err)
|
|
||||||
}
|
|
||||||
log.Debugf("Finished background recursive crawl for %s, elapsed time: %s", fullPath, time.Since(start).Round(time.Second))
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Start a blocking non-recursive crawl.
|
|
||||||
item, err := dc.CrawlNoRecursion(fullPath)
|
|
||||||
if err == nil && (os.IsNotExist(err) || item == nil) {
|
|
||||||
ReturnFake404Msg("path not found", w)
|
|
||||||
return nil
|
|
||||||
} else if err != nil {
|
|
||||||
log.Errorf("HandleFileNotFound - crawl failed: %s", err)
|
|
||||||
Return500Msg(w)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to get the data from the cache again.
|
|
||||||
item, found := sharedCache.Get(relPath)
|
|
||||||
if !found {
|
|
||||||
// TODO: let's not re-check the disk if the file is still not in the cache. Instead, let's just assume that it doesn't exist.
|
|
||||||
ReturnFake404Msg("path not found", w)
|
|
||||||
|
|
||||||
// TODO: this is the old code in case this isn't the right approach.
|
|
||||||
// If the data is still not in the cache, check if the file or directory exists.
|
|
||||||
// We could check if the file exists before checking the cache but we want to limit disk reads.
|
|
||||||
//if _, err := os.Stat(fullPath); os.IsNotExist(err) {
|
|
||||||
// log.Debugf("File not in cache: %s", fullPath)
|
|
||||||
// // If the file or directory does not exist, return a 404 status code and a message
|
|
||||||
// ReturnFake404Msg("file or directory not found", w)
|
|
||||||
// return nil
|
|
||||||
//} else if err != nil {
|
|
||||||
// // If there was an error checking if the file or directory exists, return a 500 status code and the error
|
|
||||||
// log.Errorf("LIST - %s", err.Error())
|
|
||||||
// Return500Msg(w)
|
|
||||||
// return nil
|
|
||||||
//}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If CacheItem is still nil, error
|
|
||||||
if item == nil {
|
|
||||||
log.Errorf("LIST - crawler failed to find %s and did not return a 404", relPath)
|
|
||||||
Return500Msg(w)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
cache.CheckAndRecache(fullPath, sharedCache)
|
|
||||||
return item
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsNonNegativeInt(testStr string) bool {
|
func IsNonNegativeInt(testStr string) bool {
|
||||||
if num, err := strconv.ParseInt(testStr, 10, 64); err == nil {
|
if num, err := strconv.ParseInt(testStr, 10, 64); err == nil {
|
||||||
return num >= 0
|
return num >= 0
|
||||||
|
|
|
@ -2,7 +2,10 @@ package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crazyfs/CacheItem"
|
"crazyfs/CacheItem"
|
||||||
"crazyfs/api/client"
|
"crazyfs/api/routes"
|
||||||
|
"crazyfs/api/routes/admin"
|
||||||
|
"crazyfs/api/routes/client"
|
||||||
|
"crazyfs/config"
|
||||||
"crazyfs/logging"
|
"crazyfs/logging"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -22,96 +25,86 @@ type Routes []Route
|
||||||
|
|
||||||
type AppHandler func(http.ResponseWriter, *http.Request, *lru.Cache[string, *CacheItem.Item])
|
type AppHandler func(http.ResponseWriter, *http.Request, *lru.Cache[string, *CacheItem.Item])
|
||||||
|
|
||||||
var routes = Routes{
|
var httpRoutes = Routes{
|
||||||
Route{
|
Route{
|
||||||
"ListDir",
|
"ListDir",
|
||||||
"GET",
|
"GET",
|
||||||
"/api/file/list",
|
"/api/file/list",
|
||||||
ListDir,
|
routes.ListDir,
|
||||||
},
|
},
|
||||||
Route{
|
Route{
|
||||||
"Download",
|
"Download",
|
||||||
"GET",
|
"GET",
|
||||||
"/api/file/download",
|
"/api/file/download",
|
||||||
Download,
|
routes.Download,
|
||||||
},
|
},
|
||||||
Route{
|
Route{
|
||||||
"Thumbnail",
|
"Thumbnail",
|
||||||
"GET",
|
"GET",
|
||||||
"/api/file/thumb",
|
"/api/file/thumb",
|
||||||
Thumbnail,
|
routes.Thumbnail,
|
||||||
},
|
},
|
||||||
Route{
|
Route{
|
||||||
"Search",
|
"Search",
|
||||||
"GET",
|
"GET",
|
||||||
"/api/search",
|
"/api/search",
|
||||||
SearchFile,
|
routes.SearchFile,
|
||||||
},
|
},
|
||||||
Route{
|
Route{
|
||||||
"Cache Info",
|
"Cache Info",
|
||||||
"GET",
|
"GET",
|
||||||
"/api/admin/cache/info",
|
"/api/admin/cache/info",
|
||||||
AdminCacheInfo,
|
admin.AdminCacheInfo,
|
||||||
},
|
},
|
||||||
Route{
|
Route{
|
||||||
"Trigger Recache",
|
"Trigger Recache",
|
||||||
"POST",
|
"POST",
|
||||||
"/api/admin/cache/recache",
|
"/api/admin/cache/recache",
|
||||||
AdminReCache,
|
admin.AdminReCache,
|
||||||
},
|
},
|
||||||
Route{
|
Route{
|
||||||
"Trigger Recache",
|
"Trigger Recache",
|
||||||
"GET",
|
"GET",
|
||||||
"/api/admin/cache/recache",
|
"/api/admin/cache/recache",
|
||||||
wrongMethod("POST", AdminReCache),
|
wrongMethod("POST", admin.AdminReCache),
|
||||||
},
|
},
|
||||||
Route{
|
Route{
|
||||||
"Crawls Info",
|
"Crawls Info",
|
||||||
"GET",
|
"GET",
|
||||||
"/api/admin/crawls/info",
|
"/api/admin/crawls/info",
|
||||||
AdminCrawlsInfo,
|
admin.AdminCrawlsInfo,
|
||||||
},
|
},
|
||||||
Route{
|
Route{
|
||||||
"Server Health",
|
"System Info",
|
||||||
"GET",
|
"GET",
|
||||||
"/api/health",
|
"/api/admin/sys/info",
|
||||||
HealthCheck,
|
admin.AdminSysInfo,
|
||||||
},
|
},
|
||||||
|
|
||||||
// TODO: remove
|
|
||||||
Route{
|
|
||||||
"Server Health",
|
|
||||||
"GET",
|
|
||||||
"/api/health",
|
|
||||||
HealthCheck,
|
|
||||||
},
|
|
||||||
|
|
||||||
Route{
|
Route{
|
||||||
"Server Health",
|
"Server Health",
|
||||||
"GET",
|
"GET",
|
||||||
"/api/client/health",
|
"/api/client/health",
|
||||||
client.ClientHealthCheck,
|
client.HealthCheck,
|
||||||
},
|
},
|
||||||
Route{
|
Route{
|
||||||
"Restricted Directories",
|
"Restricted Directories",
|
||||||
"GET",
|
"GET",
|
||||||
"/api/client/restricted",
|
"/api/client/restricted-download",
|
||||||
client.RestrictedDownloadDirectories,
|
client.RestrictedDownloadPaths,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func setHeaders(next http.Handler) http.Handler {
|
func setHeaders(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
w.Header().Set("Access-Control-Allow-Origin", config.GetConfig().HTTPAccessControlAllowOriginHeader)
|
||||||
w.Header().Set("Server", "crazy-file-server")
|
w.Header().Set("Server", "crazy-file-server")
|
||||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRouter(sharedCache *lru.Cache[string, *CacheItem.Item]) *mux.Router {
|
func NewRouter(sharedCache *lru.Cache[string, *CacheItem.Item]) *mux.Router {
|
||||||
r := mux.NewRouter().StrictSlash(true)
|
r := mux.NewRouter().StrictSlash(true)
|
||||||
for _, route := range routes {
|
for _, route := range httpRoutes {
|
||||||
var handler http.Handler
|
var handler http.Handler
|
||||||
|
|
||||||
// Create a new variable to hold the current route
|
// Create a new variable to hold the current route
|
|
@ -1,4 +1,4 @@
|
||||||
package api
|
package routes
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crazyfs/CacheItem"
|
"crazyfs/CacheItem"
|
|
@ -1,4 +1,4 @@
|
||||||
package api
|
package routes
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crazyfs/CacheItem"
|
"crazyfs/CacheItem"
|
||||||
|
@ -6,7 +6,6 @@ import (
|
||||||
"crazyfs/api/helpers"
|
"crazyfs/api/helpers"
|
||||||
"crazyfs/config"
|
"crazyfs/config"
|
||||||
"crazyfs/file"
|
"crazyfs/file"
|
||||||
"encoding/json"
|
|
||||||
lru "github.com/hashicorp/golang-lru/v2"
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -156,7 +155,7 @@ func ListDir(w http.ResponseWriter, r *http.Request, sharedCache *lru.Cache[stri
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Erase the children of the children so we aren't displaying things recursively
|
// Erase the children of the children, so we aren't displaying things recursively.
|
||||||
for i := range paginatedChildren {
|
for i := range paginatedChildren {
|
||||||
paginatedChildren[i].Children = nil
|
paginatedChildren[i].Children = nil
|
||||||
}
|
}
|
||||||
|
@ -175,11 +174,5 @@ func ListDir(w http.ResponseWriter, r *http.Request, sharedCache *lru.Cache[stri
|
||||||
"type": item.Type,
|
"type": item.Type,
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Cache-Control", "no-store")
|
helpers.WriteJsonResponse(response, true, w, r)
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
err = json.NewEncoder(w).Encode(response)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("LIST - Failed to serialize JSON: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package api
|
package routes
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crazyfs/CacheItem"
|
"crazyfs/CacheItem"
|
||||||
|
@ -155,16 +155,10 @@ func SearchFile(w http.ResponseWriter, r *http.Request, sharedCache *lru.Cache[s
|
||||||
searchDuration := time.Since(searchStart).Round(time.Second)
|
searchDuration := time.Since(searchStart).Round(time.Second)
|
||||||
log.Debugf(`SEARCH - %s - Query: "%s" - Results: %d - Elapsed: %d`, logging.GetRealIP(r), queryString, len(results), searchDuration)
|
log.Debugf(`SEARCH - %s - Query: "%s" - Results: %d - Elapsed: %d`, logging.GetRealIP(r), queryString, len(results), searchDuration)
|
||||||
|
|
||||||
w.Header().Set("Cache-Control", "no-store")
|
response := map[string]interface{}{
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
err := json.NewEncoder(w).Encode(map[string]interface{}{
|
|
||||||
"results": results,
|
"results": results,
|
||||||
"numResults": len(results),
|
"numResults": len(results),
|
||||||
"elapsed": searchDuration,
|
"elapsed": searchDuration,
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("SEARCH - Failed to serialize JSON: %s", err)
|
|
||||||
helpers.Return500Msg(w)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
helpers.WriteJsonResponse(response, true, w, r)
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package api
|
package routes
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
|
@ -1,4 +1,4 @@
|
||||||
package api
|
package admin
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crazyfs/CacheItem"
|
"crazyfs/CacheItem"
|
|
@ -1,4 +1,4 @@
|
||||||
package api
|
package admin
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crazyfs/CacheItem"
|
"crazyfs/CacheItem"
|
|
@ -1,4 +1,4 @@
|
||||||
package api
|
package admin
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crazyfs/CacheItem"
|
"crazyfs/CacheItem"
|
|
@ -0,0 +1,36 @@
|
||||||
|
package admin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crazyfs/CacheItem"
|
||||||
|
"crazyfs/api/helpers"
|
||||||
|
"crazyfs/config"
|
||||||
|
"crazyfs/logging"
|
||||||
|
"crypto/sha256"
|
||||||
|
"crypto/subtle"
|
||||||
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func AdminSysInfo(w http.ResponseWriter, r *http.Request, sharedCache *lru.Cache[string, *CacheItem.Item]) {
|
||||||
|
username, password, ok := r.BasicAuth()
|
||||||
|
if ok {
|
||||||
|
usernameHash := sha256.Sum256([]byte(username))
|
||||||
|
passwordHash := sha256.Sum256([]byte(password))
|
||||||
|
expectedUsernameHash := sha256.Sum256([]byte("admin"))
|
||||||
|
expectedPasswordHash := sha256.Sum256([]byte(config.GetConfig().HttpAdminKey))
|
||||||
|
usernameMatch := subtle.ConstantTimeCompare(usernameHash[:], expectedUsernameHash[:]) == 1
|
||||||
|
passwordMatch := subtle.ConstantTimeCompare(passwordHash[:], expectedPasswordHash[:]) == 1
|
||||||
|
|
||||||
|
if !usernameMatch || !passwordMatch {
|
||||||
|
helpers.Return401Msg("unauthorized", w)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
response := logging.MemUsage()
|
||||||
|
w.Header().Set("Cache-Control", "no-store")
|
||||||
|
helpers.WriteJsonResponse(response, false, w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.Header().Set("WWW-Authenticate", `Basic realm="restricted", charset="UTF-8"`)
|
||||||
|
helpers.Return401Msg("unauthorized", w)
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
package admin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crazyfs/logging"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log *logrus.Logger
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log = logging.GetLogger()
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crazyfs/CacheItem"
|
||||||
|
"crazyfs/api/helpers"
|
||||||
|
"crazyfs/cache"
|
||||||
|
"crazyfs/cache/DirectoryCrawler"
|
||||||
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: show the time the initial crawl started
|
||||||
|
|
||||||
|
func HealthCheck(w http.ResponseWriter, r *http.Request, sharedCache *lru.Cache[string, *CacheItem.Item]) {
|
||||||
|
response := map[string]interface{}{}
|
||||||
|
response["scanRunning"] = DirectoryCrawler.GetTotalActiveCrawls() > 0
|
||||||
|
response["initialScanRunning"] = cache.InitialCrawlInProgress
|
||||||
|
helpers.WriteJsonResponse(response, false, w, r)
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crazyfs/CacheItem"
|
||||||
|
"crazyfs/api/helpers"
|
||||||
|
"crazyfs/config"
|
||||||
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func RestrictedDownloadPaths(w http.ResponseWriter, r *http.Request, sharedCache *lru.Cache[string, *CacheItem.Item]) {
|
||||||
|
response := config.GetConfig().RestrictedDownloadPaths
|
||||||
|
helpers.WriteJsonResponse(response, false, w, r)
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
package routes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crazyfs/logging"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log *logrus.Logger
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log = logging.GetLogger()
|
||||||
|
}
|
|
@ -10,39 +10,40 @@ import (
|
||||||
var cfg *Config
|
var cfg *Config
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
RootDir string
|
RootDir string
|
||||||
HTTPPort string
|
HTTPPort string
|
||||||
CrawlModeCrawlInterval int
|
CrawlModeCrawlInterval int
|
||||||
DirectoryCrawlers int
|
DirectoryCrawlers int
|
||||||
CacheSize int
|
CacheSize int
|
||||||
CacheTime int
|
CacheTime int
|
||||||
CachePrintNew bool
|
CachePrintNew bool
|
||||||
InitialCrawl bool
|
InitialCrawl bool
|
||||||
CacheRecacheCrawlerLimit int
|
CacheRecacheCrawlerLimit int
|
||||||
CrawlerParseMIME bool
|
CrawlerParseMIME bool
|
||||||
CrawlerParseEncoding bool
|
CrawlerParseEncoding bool
|
||||||
HttpAPIListCacheControl int
|
HttpAPIListCacheControl int
|
||||||
HttpAPIDlCacheControl int
|
HttpAPIDlCacheControl int
|
||||||
HttpAllowDirMimeParse bool
|
HttpAllowDirMimeParse bool
|
||||||
HttpAdminKey string
|
HttpAdminKey string
|
||||||
HttpAllowDuringInitialCrawl bool
|
HttpAllowDuringInitialCrawl bool
|
||||||
RestrictedDownloadPaths []string
|
RestrictedDownloadPaths []string
|
||||||
ApiSearchMaxResults int
|
ApiSearchMaxResults int
|
||||||
ApiSearchShowChildren bool
|
ApiSearchShowChildren bool
|
||||||
ElasticsearchEnable bool
|
ElasticsearchEnable bool
|
||||||
ElasticsearchEndpoint string
|
ElasticsearchEndpoint string
|
||||||
ElasticsearchSyncEnable bool
|
ElasticsearchSyncEnable bool
|
||||||
ElasticsearchSyncInterval int
|
ElasticsearchSyncInterval int
|
||||||
ElasticsearchFullSyncInterval int
|
ElasticsearchFullSyncInterval int
|
||||||
ElasticsearchAPIKey string
|
ElasticsearchAPIKey string
|
||||||
ElasticsearchIndex string
|
ElasticsearchIndex string
|
||||||
ElasticsearchSyncThreads int
|
ElasticsearchSyncThreads int
|
||||||
ElasticsearchExcludePatterns []string
|
ElasticsearchExcludePatterns []string
|
||||||
ElasticsearchAllowConcurrentSyncs bool
|
ElasticsearchAllowConcurrentSyncs bool
|
||||||
ElasticsearchFullSyncOnStart bool
|
ElasticsearchFullSyncOnStart bool
|
||||||
ElasticsearchDefaultQueryField string
|
ElasticsearchDefaultQueryField string
|
||||||
HTTPRealIPHeader string
|
HTTPRealIPHeader string
|
||||||
HTTPNoMimeSniffHeader bool
|
HTTPNoMimeSniffHeader bool
|
||||||
|
HTTPAccessControlAllowOriginHeader string
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetConfig(configFile string) (*Config, error) {
|
func SetConfig(configFile string) (*Config, error) {
|
||||||
|
@ -88,6 +89,7 @@ func SetConfig(configFile string) (*Config, error) {
|
||||||
viper.SetDefault("elasticsearch_default_query_field", "name")
|
viper.SetDefault("elasticsearch_default_query_field", "name")
|
||||||
viper.SetDefault("http_real_ip_header", "X-Forwarded-For")
|
viper.SetDefault("http_real_ip_header", "X-Forwarded-For")
|
||||||
viper.SetDefault("http_no_mime_sniff_header", false)
|
viper.SetDefault("http_no_mime_sniff_header", false)
|
||||||
|
viper.SetDefault("http_access_control_allow_origin_header", "*")
|
||||||
|
|
||||||
err := viper.ReadInConfig()
|
err := viper.ReadInConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -107,39 +109,40 @@ func SetConfig(configFile string) (*Config, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
config := &Config{
|
config := &Config{
|
||||||
RootDir: rootDir,
|
RootDir: rootDir,
|
||||||
HTTPPort: viper.GetString("http_port"),
|
HTTPPort: viper.GetString("http_port"),
|
||||||
CrawlModeCrawlInterval: viper.GetInt("crawl_mode_crawl_interval"),
|
CrawlModeCrawlInterval: viper.GetInt("crawl_mode_crawl_interval"),
|
||||||
DirectoryCrawlers: viper.GetInt("directory_crawlers"),
|
DirectoryCrawlers: viper.GetInt("directory_crawlers"),
|
||||||
CacheSize: viper.GetInt("cache_size"),
|
CacheSize: viper.GetInt("cache_size"),
|
||||||
CacheTime: viper.GetInt("cache_time"),
|
CacheTime: viper.GetInt("cache_time"),
|
||||||
CachePrintNew: viper.GetBool("cache_print_new"),
|
CachePrintNew: viper.GetBool("cache_print_new"),
|
||||||
InitialCrawl: viper.GetBool("initial_crawl"),
|
InitialCrawl: viper.GetBool("initial_crawl"),
|
||||||
CacheRecacheCrawlerLimit: viper.GetInt("cache_recache_crawler_limit"),
|
CacheRecacheCrawlerLimit: viper.GetInt("cache_recache_crawler_limit"),
|
||||||
CrawlerParseMIME: viper.GetBool("crawler_parse_mime"),
|
CrawlerParseMIME: viper.GetBool("crawler_parse_mime"),
|
||||||
CrawlerParseEncoding: viper.GetBool("crawler_parse_encoding"),
|
CrawlerParseEncoding: viper.GetBool("crawler_parse_encoding"),
|
||||||
HttpAPIListCacheControl: viper.GetInt("http_api_list_cache_control"),
|
HttpAPIListCacheControl: viper.GetInt("http_api_list_cache_control"),
|
||||||
HttpAPIDlCacheControl: viper.GetInt("http_api_download_cache_control"),
|
HttpAPIDlCacheControl: viper.GetInt("http_api_download_cache_control"),
|
||||||
HttpAllowDirMimeParse: viper.GetBool("http_allow_dir_mime_parse"),
|
HttpAllowDirMimeParse: viper.GetBool("http_allow_dir_mime_parse"),
|
||||||
HttpAdminKey: viper.GetString("api_admin_key"),
|
HttpAdminKey: viper.GetString("api_admin_key"),
|
||||||
HttpAllowDuringInitialCrawl: viper.GetBool("http_allow_during_initial_crawl"),
|
HttpAllowDuringInitialCrawl: viper.GetBool("http_allow_during_initial_crawl"),
|
||||||
RestrictedDownloadPaths: restrictedPaths,
|
RestrictedDownloadPaths: restrictedPaths,
|
||||||
ApiSearchMaxResults: viper.GetInt("api_search_max_results"),
|
ApiSearchMaxResults: viper.GetInt("api_search_max_results"),
|
||||||
ApiSearchShowChildren: viper.GetBool("api_search_show_children"),
|
ApiSearchShowChildren: viper.GetBool("api_search_show_children"),
|
||||||
ElasticsearchEnable: viper.GetBool("elasticsearch_enable"),
|
ElasticsearchEnable: viper.GetBool("elasticsearch_enable"),
|
||||||
ElasticsearchEndpoint: viper.GetString("elasticsearch_endpoint"),
|
ElasticsearchEndpoint: viper.GetString("elasticsearch_endpoint"),
|
||||||
ElasticsearchSyncEnable: viper.GetBool("elasticsearch_sync_enable"),
|
ElasticsearchSyncEnable: viper.GetBool("elasticsearch_sync_enable"),
|
||||||
ElasticsearchSyncInterval: viper.GetInt("elasticsearch_sync_interval"),
|
ElasticsearchSyncInterval: viper.GetInt("elasticsearch_sync_interval"),
|
||||||
ElasticsearchFullSyncInterval: viper.GetInt("elasticsearch_full_sync_interval"),
|
ElasticsearchFullSyncInterval: viper.GetInt("elasticsearch_full_sync_interval"),
|
||||||
ElasticsearchAPIKey: viper.GetString("elasticsearch_api_key"),
|
ElasticsearchAPIKey: viper.GetString("elasticsearch_api_key"),
|
||||||
ElasticsearchIndex: viper.GetString("elasticsearch_index"),
|
ElasticsearchIndex: viper.GetString("elasticsearch_index"),
|
||||||
ElasticsearchSyncThreads: viper.GetInt("elasticsearch_sync_threads"),
|
ElasticsearchSyncThreads: viper.GetInt("elasticsearch_sync_threads"),
|
||||||
ElasticsearchExcludePatterns: viper.GetStringSlice("elasticsearch_exclude_patterns"),
|
ElasticsearchExcludePatterns: viper.GetStringSlice("elasticsearch_exclude_patterns"),
|
||||||
ElasticsearchAllowConcurrentSyncs: viper.GetBool("elasticsearch_allow_concurrent_syncs"),
|
ElasticsearchAllowConcurrentSyncs: viper.GetBool("elasticsearch_allow_concurrent_syncs"),
|
||||||
ElasticsearchFullSyncOnStart: viper.GetBool("elasticsearch_full_sync_on_start"),
|
ElasticsearchFullSyncOnStart: viper.GetBool("elasticsearch_full_sync_on_start"),
|
||||||
ElasticsearchDefaultQueryField: viper.GetString("elasticsearch_default_query_field"),
|
ElasticsearchDefaultQueryField: viper.GetString("elasticsearch_default_query_field"),
|
||||||
HTTPRealIPHeader: viper.GetString("http_real_ip_header"),
|
HTTPRealIPHeader: viper.GetString("http_real_ip_header"),
|
||||||
HTTPNoMimeSniffHeader: viper.GetBool("http_no_mime_sniff_header"),
|
HTTPNoMimeSniffHeader: viper.GetBool("http_no_mime_sniff_header"),
|
||||||
|
HTTPAccessControlAllowOriginHeader: viper.GetString("http_access_control_allow_origin_header"),
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.CacheTime < 0 {
|
if config.CacheTime < 0 {
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type statusWriter struct {
|
type statusWriter struct {
|
||||||
|
@ -39,10 +40,16 @@ func GetRealIP(r *http.Request) string {
|
||||||
func LogRequest(handler http.Handler) http.Handler {
|
func LogRequest(handler http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
sw := statusWriter{ResponseWriter: w, status: http.StatusOK} // set default status
|
sw := statusWriter{ResponseWriter: w, status: http.StatusOK} // set default status
|
||||||
|
|
||||||
|
start := time.Now() // record the start time
|
||||||
handler.ServeHTTP(&sw, r)
|
handler.ServeHTTP(&sw, r)
|
||||||
|
end := time.Now() // record the end time
|
||||||
|
|
||||||
|
// calculate the duration
|
||||||
|
duration := end.Sub(start)
|
||||||
|
|
||||||
ip := GetRealIP(r)
|
ip := GetRealIP(r)
|
||||||
|
|
||||||
log.Infof("%s - %d - %s from %s", r.Method, sw.status, r.URL.RequestURI(), ip)
|
log.Infof("%s - %d - %s from %s took %v", r.Method, sw.status, r.URL.RequestURI(), ip, duration)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
package logging
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MemUsage() map[string]interface{} {
|
||||||
|
// https://golang.org/pkg/runtime/#MemStats
|
||||||
|
var m runtime.MemStats
|
||||||
|
runtime.ReadMemStats(&m)
|
||||||
|
return map[string]interface{}{
|
||||||
|
"numGC": m.NumGC, // number of completed GC cycles
|
||||||
|
"machine": map[string]interface{}{
|
||||||
|
"allocated": m.Alloc, // size allocated heap objects
|
||||||
|
"totalAllocated": m.TotalAlloc, // cumulative size allocated for heap objects
|
||||||
|
"sys": m.Sys, // total memory obtained from the OS
|
||||||
|
},
|
||||||
|
"human": map[string]interface{}{
|
||||||
|
"allocated": humanReadable(m.Alloc),
|
||||||
|
"totalAllocated": humanReadable(m.TotalAlloc),
|
||||||
|
"sys": humanReadable(m.Sys),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func humanReadable(b uint64) string {
|
||||||
|
const unit = 1024
|
||||||
|
if b < unit {
|
||||||
|
return fmt.Sprintf("%d B", b)
|
||||||
|
}
|
||||||
|
div, exp := uint64(unit), 0
|
||||||
|
for n := b / unit; n >= unit; n /= unit {
|
||||||
|
div *= unit
|
||||||
|
exp++
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "KMGTPE"[exp])
|
||||||
|
}
|
Loading…
Reference in New Issue