redo all logging messages, minor qol
This commit is contained in:
parent
7fa7c362d5
commit
4200d4c710
|
@ -39,3 +39,6 @@ so minutes for it to traverse the directory structure. CrazyFS is heavily thread
|
|||
machine.
|
||||
|
||||
You'll need something line Nginx if you want SSL or HTTP. Also, CrazyFS works great with an HTTP cache in front of it.
|
||||
|
||||
## To Do
|
||||
- [ ] Remove symlink support.
|
|
@ -13,7 +13,7 @@ import (
|
|||
func NewItem(fullPath string, info os.FileInfo) *Item {
|
||||
RetardCheck(fullPath)
|
||||
if config.GetConfig().CachePrintNew {
|
||||
log.Debugf("CACHE - new: %s", fullPath)
|
||||
log.Debugf("CACHEITEM:New - New cache item: %s", fullPath)
|
||||
}
|
||||
|
||||
pathExists, _ := file.PathExists(fullPath)
|
||||
|
@ -22,7 +22,7 @@ func NewItem(fullPath string, info os.FileInfo) *Item {
|
|||
// Ignore symlinks
|
||||
return nil
|
||||
} else {
|
||||
log.Warnf("NewItem - StartPath does not exist: %s", fullPath)
|
||||
log.Warnf("CACHEITEM:New - Path does not exist: %s", fullPath)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -48,16 +48,17 @@ func NewItem(fullPath string, info os.FileInfo) *Item {
|
|||
if config.GetConfig().CrawlerParseEncoding {
|
||||
encoding, err = file.DetectFileEncoding(fullPath)
|
||||
if err != nil {
|
||||
log.Warnf("ITEM - Error detecting file encoding of file %s - %v", fullPath, err)
|
||||
log.Warnf("CACHEITEM:New - Error detecting file encoding of file %s - %v", fullPath, err)
|
||||
encoding = "utf-8" // fall back to utf-8
|
||||
}
|
||||
}
|
||||
|
||||
// Catch any errors caused by detecting the MIME.
|
||||
if os.IsNotExist(err) {
|
||||
log.Warnf("StartPath does not exist: %s", fullPath)
|
||||
log.Warnf("CACHEITEM:New - Cannot detect MIME: path does not exist: %s", fullPath)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
log.Warnf("Error detecting MIME type of file %s - %v", fullPath, err)
|
||||
log.Warnf("CACHEITEM:New - Error detecting MIME type of file %s - %v", fullPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ func (dc *DirectoryCrawler) walkRecursiveFunc(fullPath string, info os.FileInfo,
|
|||
CacheItem.RetardCheck(fullPath)
|
||||
processErr := dc.processPath(fullPath, info)
|
||||
if processErr != nil {
|
||||
log.Errorf("CRAWLER - walkRecursiveFunc() failed - %s - %s", processErr, fullPath)
|
||||
log.Errorf(`CRAWLER:walkRecursiveFunc - failed on "%s": %s`, fullPath, processErr)
|
||||
return processErr
|
||||
}
|
||||
return nil
|
||||
|
@ -29,12 +29,12 @@ func (dc *DirectoryCrawler) walkNonRecursiveFunc(fullPath string, dir os.DirEntr
|
|||
CacheItem.RetardCheck(fullPath)
|
||||
info, infoErr := dir.Info()
|
||||
if infoErr != nil {
|
||||
log.Errorf("CRAWLER - walkNonRecursiveFunc() - get info failed - %s - %s", infoErr, fullPath)
|
||||
log.Errorf(`CRAWLER:walkNonRecursiveFunc - Get info failed on "%s": %s`, fullPath, infoErr)
|
||||
return infoErr
|
||||
}
|
||||
processErr := dc.processPath(fullPath, info)
|
||||
if processErr != nil {
|
||||
log.Errorf("CRAWLER - walkNonRecursiveFunc() failed - %s - %s", processErr, fullPath)
|
||||
log.Errorf(`CRAWLER:walkNonRecursiveFunc - Failed on "%s": %s`, fullPath, processErr)
|
||||
return processErr
|
||||
}
|
||||
return nil
|
||||
|
@ -68,7 +68,7 @@ func (dc *DirectoryCrawler) Crawl(fullPath string, walkFunc func(string, os.File
|
|||
return err
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("CRAWLER - Crawl() - os.Lstat() failed - %s", err)
|
||||
log.Errorf(`CRAWLER:Crawl - os.Lstat() failed on "%s": %s`, fullPath, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -93,9 +93,8 @@ func (dc *DirectoryCrawler) Crawl(fullPath string, walkFunc func(string, os.File
|
|||
// If the path is a directory, start a walk
|
||||
err := Workers.Walk(fullPath, config.FollowSymlinks, walkFunc)
|
||||
if err != nil {
|
||||
log.Errorf("CRAWLER - crawl for %s failed: %s", fullPath, err)
|
||||
log.Errorf(`CRAWLER:Crawl - Crawl for "%s" failed: %s`, fullPath, err)
|
||||
}
|
||||
|
||||
} else {
|
||||
// If the path is a file, add it to the cache directly
|
||||
dc.AddCacheItem(fullPath, info)
|
||||
|
@ -118,7 +117,7 @@ func (dc *DirectoryCrawler) CrawlNoRecursion(fullPath string) (*CacheItem.Item,
|
|||
return nil, err
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("CRAWLER - CrawlNoRecursion() - os.Lstat() failed - %s", err)
|
||||
log.Errorf(`CRAWLER:CrawlNoRecursion - os.Lstat() failed on "%s": %s`, fullPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -149,7 +148,7 @@ func (dc *DirectoryCrawler) CrawlNoRecursion(fullPath string) (*CacheItem.Item,
|
|||
|
||||
err := filepath.WalkDir(fullPath, dc.walkNonRecursiveFunc)
|
||||
if err != nil {
|
||||
log.Errorf("CRAWLER - non-recursive crawl for %s failed: %s", fullPath, err)
|
||||
log.Errorf(`CRAWLER:CrawlNoRecursion - Crawl for "%s" failed: %s`, fullPath, err)
|
||||
return nil, err
|
||||
}
|
||||
item, _ = SharedCache.Cache.Get(relPath)
|
||||
|
|
|
@ -19,7 +19,8 @@ func (dc *DirectoryCrawler) processPath(fullPath string, info os.FileInfo) error
|
|||
|
||||
children, err := os.ReadDir(fullPath)
|
||||
if err != nil {
|
||||
log.Errorf("CRAWLER - processPath() failed to read directory %s: %s", fullPath, err)
|
||||
log.Errorf(`CRAWLER:processPath - Failed to read directory "%s": %s`, fullPath, err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range children {
|
||||
|
|
|
@ -81,15 +81,16 @@ func NewResponseItem(cacheItem *CacheItem.Item) *ResponseItem {
|
|||
// because that would be an extra os.Lstat() call in processPath().
|
||||
if !found {
|
||||
crawlRelPath := filepath.Join(config.GetConfig().RootDir, child)
|
||||
log.Debugf(`CRAWLER - "%s" ("%s") not in cache, crawling.`, child, crawlRelPath)
|
||||
// TODO: when does this get triggered?
|
||||
log.Debugf(`NewResponseItem:Crawl - Not in cache, crawling: "%s" ("%s")`, child, crawlRelPath)
|
||||
dc := DirectoryCrawler.NewDirectoryCrawler()
|
||||
item, err := dc.CrawlNoRecursion(crawlRelPath)
|
||||
if err != nil {
|
||||
log.Errorf("NewResponseItem - CrawlNoRecursion - %s", err)
|
||||
log.Errorf("NewResponseItem:Crawl - %s", err)
|
||||
continue // skip this child
|
||||
}
|
||||
if item == nil {
|
||||
log.Debugf(`NewResponseItem - CrawlNoRecursion - not found: "%s". Likely broken symlink`, child)
|
||||
log.Debugf(`NewResponseItem:Crawl - Not found: "%s". Likely broken symlink`, child)
|
||||
continue
|
||||
}
|
||||
childItem = item // Update the `childItem` var with the newly cached item.
|
||||
|
|
|
@ -32,7 +32,7 @@ func (w *Walker) processPath(relPath string) error {
|
|||
fullPath := filepath.Join(w.root, relPath)
|
||||
names, err := readDirNames(fullPath)
|
||||
if err != nil {
|
||||
log.Errorf("Walker - processPath - readDirNames - %s", err)
|
||||
log.Errorf("Walker:processPath:readDirNames - %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -42,12 +42,12 @@ func (w *Walker) processPath(relPath string) error {
|
|||
info, err := w.lstat(subPath)
|
||||
|
||||
if err != nil {
|
||||
log.Warnf("processPath - %s - %s", relPath, err)
|
||||
log.Warnf("Walker:processPath - %s - %s", relPath, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
log.Warnf("processPath - %s - %s", relPath, err)
|
||||
log.Warnf("Walker:processPath - %s - %s", relPath, err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -13,14 +13,14 @@ import (
|
|||
func HandleFileNotFound(relPath string, fullPath string, w http.ResponseWriter) *CacheItem.Item {
|
||||
// TODO: implement some sort of backoff or delay for repeated calls to recache the same path.
|
||||
|
||||
log.Debugf("CRAWLER - %s not in cache, crawling", fullPath)
|
||||
log.Debugf(`HELPERS:HandleFileNotFound:Crawl - Not in cache, crawling: "%s"`, fullPath)
|
||||
dc := DirectoryCrawler.NewDirectoryCrawler()
|
||||
|
||||
// Check if this is a symlink. We do this before CrawlNoRecursion() because we want to tell the end user that
|
||||
// we're not going to resolve this symlink.
|
||||
//info, err := os.Lstat(fullPath)
|
||||
//if err != nil {
|
||||
// log.Errorf("HandleFileNotFound - os.Lstat failed: %s", err)
|
||||
// log.Errorf("HELPERS:HandleFileNotFound - os.Lstat failed: %s", err)
|
||||
// Return500Msg(w)
|
||||
// return nil
|
||||
//}
|
||||
|
@ -39,7 +39,7 @@ func HandleFileNotFound(relPath string, fullPath string, w http.ResponseWriter)
|
|||
if os.IsNotExist(err) {
|
||||
ReturnFake404Msg("path not found", w)
|
||||
} else {
|
||||
log.Errorf("HandleFileNotFound - crawl failed: %s", err)
|
||||
log.Errorf("HELPERS:HandleFileNotFound:Crawl - Crawl failed: %s", err)
|
||||
Return500Msg(w)
|
||||
}
|
||||
return nil
|
||||
|
@ -69,7 +69,7 @@ func HandleFileNotFound(relPath string, fullPath string, w http.ResponseWriter)
|
|||
|
||||
// If CacheItem is still nil, error
|
||||
if item == nil {
|
||||
log.Errorf("LIST - crawler failed to find %s and did not return a 404", relPath)
|
||||
log.Errorf("HELPERS:HandleFileNotFound:Crawl - Failed to find %s and did not return a 404", relPath)
|
||||
Return500Msg(w)
|
||||
return nil
|
||||
}
|
||||
|
@ -78,18 +78,19 @@ func HandleFileNotFound(relPath string, fullPath string, w http.ResponseWriter)
|
|||
//cache.CheckAndRecache(fullPath)
|
||||
|
||||
duration := time.Since(start).Round(time.Second)
|
||||
log.Debugf(`LIST - took %s to find the missing path "%s"`, duration, relPath)
|
||||
log.Debugf(`HandleFileNotFound:Crawl - Took %s to find the missing path "%s"`, duration, relPath)
|
||||
|
||||
// Start a recursive crawl in the background.
|
||||
go func() {
|
||||
log.Debugf("Starting background recursive crawl for %s", fullPath)
|
||||
log.Debugf("HELPERS:HandleFileNotFound:Crawl - Starting background recursive crawl for %s", fullPath)
|
||||
dc := DirectoryCrawler.NewDirectoryCrawler()
|
||||
start := time.Now()
|
||||
err := dc.Crawl(fullPath, nil)
|
||||
if err != nil {
|
||||
log.Errorf("LIST - background recursive crawl failed: %s", err)
|
||||
log.Errorf("HELPERS:HandleFileNotFound:Crawl - Background recursive crawl failed: %s", err)
|
||||
} else {
|
||||
log.Debugf("HELPERS:HandleFileNotFound:Crawl - Finished background recursive crawl for %s, elapsed time: %s", fullPath, time.Since(start).Round(time.Second))
|
||||
}
|
||||
log.Debugf("Finished background recursive crawl for %s, elapsed time: %s", fullPath, time.Since(start).Round(time.Second))
|
||||
}()
|
||||
|
||||
return item
|
||||
|
|
|
@ -15,7 +15,7 @@ func WriteJsonResponse(response any, minified bool, w http.ResponseWriter, r *ht
|
|||
} else {
|
||||
jsonData, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
log.Fatalf("Error marshaling the map: %v", err)
|
||||
log.Fatalf("HELPERS:WriteJsonResponse - Error marshaling the map: %v", err)
|
||||
} else {
|
||||
var compactedBuffer bytes.Buffer
|
||||
err = json.Compact(&compactedBuffer, jsonData)
|
||||
|
@ -25,12 +25,12 @@ func WriteJsonResponse(response any, minified bool, w http.ResponseWriter, r *ht
|
|||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("Failed to serialize JSON: %s - %s", err, r.URL.RequestURI())
|
||||
log.Errorf("HELPERS:WriteJsonResponse - Failed to serialize JSON: %s - %s", err, r.URL.RequestURI())
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, err = w.Write(jsonResponse)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to write JSON response: %s - %s", err, r.URL.RequestURI())
|
||||
log.Errorf("HELPERS:WriteJsonResponse - Failed to write JSON response: %s - %s", err, r.URL.RequestURI())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ func WriteErrorResponse(jsonCode, httpCode int, msg string, w http.ResponseWrite
|
|||
})
|
||||
|
||||
if err != nil {
|
||||
log.Errorln("HELPERS - WriteErrorResponse failed to encode JSON response: ", err)
|
||||
log.Errorln("HELPERS:WriteErrorResponse - Failed to encode JSON response: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ func ZipHandlerCompress(dirPath string, w http.ResponseWriter, r *http.Request)
|
|||
|
||||
err := zipWriter.Close()
|
||||
if err != nil {
|
||||
log.Errorf("ZIPSTREM - failed to close zipwriter: %s", err)
|
||||
log.Errorf("HELPERS:ZipHandlerCompress - Failed to close zipwriter: %s", err)
|
||||
}
|
||||
}
|
||||
func ZipHandlerCompressMultiple(paths []string, w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -118,7 +118,7 @@ func ZipHandlerCompressMultiple(paths []string, w http.ResponseWriter, r *http.R
|
|||
|
||||
err := zipWriter.Close()
|
||||
if err != nil {
|
||||
log.Errorf("ZIPSTREM - failed to close zipwriter: %s", err)
|
||||
log.Errorf("HELPERS:ZipHandlerCompressMultiple - Failed to close zipwriter: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,70 +26,70 @@ type AppHandler func(http.ResponseWriter, *http.Request)
|
|||
|
||||
var httpRoutes = Routes{
|
||||
Route{
|
||||
"ListDir",
|
||||
"APIList",
|
||||
"GET",
|
||||
"/api/file/list",
|
||||
file.ListDir,
|
||||
file.APIList,
|
||||
},
|
||||
Route{
|
||||
"Download",
|
||||
"APIDownload",
|
||||
"GET",
|
||||
"/api/file/download",
|
||||
file.Download,
|
||||
file.APIDownload,
|
||||
},
|
||||
Route{
|
||||
"Thumbnail",
|
||||
"APIThumbnail",
|
||||
"GET",
|
||||
"/api/file/thumb",
|
||||
file.Thumbnail,
|
||||
file.APIThumbnail,
|
||||
},
|
||||
Route{
|
||||
"Search",
|
||||
"APISearch",
|
||||
"GET",
|
||||
"/api/search",
|
||||
routes.SearchFile,
|
||||
routes.APISearch,
|
||||
},
|
||||
Route{
|
||||
"Cache Info",
|
||||
"GET",
|
||||
"/api/admin/cache/info",
|
||||
admin.AdminCacheInfo,
|
||||
admin.APIAdminCacheInfo,
|
||||
},
|
||||
Route{
|
||||
"Trigger Recache",
|
||||
"POST",
|
||||
"/api/admin/cache/recache",
|
||||
admin.AdminReCache,
|
||||
admin.APIAdminRecache,
|
||||
},
|
||||
Route{
|
||||
"Trigger Recache",
|
||||
"GET",
|
||||
"/api/admin/cache/recache",
|
||||
wrongMethod("POST", admin.AdminReCache),
|
||||
wrongMethod("POST", admin.APIAdminRecache),
|
||||
},
|
||||
Route{
|
||||
"Crawls Info",
|
||||
"GET",
|
||||
"/api/admin/crawls/info",
|
||||
admin.AdminCrawlsInfo,
|
||||
admin.APIAdminCrawlsInfo,
|
||||
},
|
||||
Route{
|
||||
"System Info",
|
||||
"GET",
|
||||
"/api/admin/sys/info",
|
||||
admin.AdminSysInfo,
|
||||
admin.APIAdminSysInfo,
|
||||
},
|
||||
Route{
|
||||
"Server Health",
|
||||
"GET",
|
||||
"/api/client/health",
|
||||
client.HealthCheck,
|
||||
client.APIHealthCheck,
|
||||
},
|
||||
Route{
|
||||
"Restricted Directories",
|
||||
"GET",
|
||||
"/api/client/restricted-download",
|
||||
client.RestrictedDownloadPaths,
|
||||
client.APIRestrictedDownloadPaths,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func SearchFile(w http.ResponseWriter, r *http.Request) {
|
||||
func APISearch(w http.ResponseWriter, r *http.Request) {
|
||||
if helpers.CheckInitialCrawl() {
|
||||
helpers.HandleRejectDuringInitialCrawl(w)
|
||||
return
|
||||
|
@ -72,7 +72,7 @@ func SearchFile(w http.ResponseWriter, r *http.Request) {
|
|||
// Perform the Elasticsearch query
|
||||
resp, err := elastic.SimpleQuery(queryString, excludeElements)
|
||||
if err != nil {
|
||||
log.Errorf(`SEARCH - Failed to perform Elasticsearch query "%s" - %s`, queryString, err)
|
||||
log.Errorf(`ROUTES:APISearch - Failed to perform Elasticsearch query "%s" - %s`, queryString, err)
|
||||
helpers.Return500Msg(w)
|
||||
return
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func SearchFile(w http.ResponseWriter, r *http.Request) {
|
|||
var respData map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&respData)
|
||||
if err != nil {
|
||||
log.Errorf(`SEARCH - Failed to parse Elasticsearch response for query "%s" - %s`, queryString, err)
|
||||
log.Errorf(`ROUTES:APISearch - Failed to parse Elasticsearch response for query "%s" - %s`, queryString, err)
|
||||
helpers.Return500Msg(w)
|
||||
return
|
||||
}
|
||||
|
@ -152,7 +152,7 @@ func SearchFile(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
searchDuration := time.Since(searchStart) // .Round(time.Second)
|
||||
log.Debugf(`SEARCH - %s - Query: "%s" - Results: %d - Elapsed: %d`, logging.GetRealIP(r), queryString, len(results), searchDuration)
|
||||
log.Debugf(`ROUTES:APISearch - %s - Query: "%s" - Results: %d - Elapsed: %d`, logging.GetRealIP(r), queryString, len(results), searchDuration)
|
||||
|
||||
response := map[string]interface{}{
|
||||
"results": results,
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
func AdminCacheInfo(w http.ResponseWriter, r *http.Request) {
|
||||
func APIAdminCacheInfo(w http.ResponseWriter, r *http.Request) {
|
||||
username, password, ok := r.BasicAuth()
|
||||
if ok {
|
||||
usernameHash := sha256.Sum256([]byte(username))
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"sync/atomic"
|
||||
)
|
||||
|
||||
func AdminCrawlsInfo(w http.ResponseWriter, r *http.Request) {
|
||||
func APIAdminCrawlsInfo(w http.ResponseWriter, r *http.Request) {
|
||||
username, password, ok := r.BasicAuth()
|
||||
if ok {
|
||||
usernameHash := sha256.Sum256([]byte(username))
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
func AdminReCache(w http.ResponseWriter, r *http.Request) {
|
||||
func APIAdminRecache(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
helpers.Return400Msg("this is a POST endpoint", w)
|
||||
return
|
||||
|
@ -24,7 +24,7 @@ func AdminReCache(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
// TODO: use basic auth
|
||||
// TODO: use basic auth
|
||||
auth := requestBody["auth"]
|
||||
if auth == "" || auth != config.GetConfig().HttpAdminKey {
|
||||
helpers.Return401Msg("unauthorized", w)
|
||||
|
@ -37,7 +37,7 @@ func AdminReCache(w http.ResponseWriter, r *http.Request) {
|
|||
fullPath, errJoin := file.SafeJoin(pathArg)
|
||||
traversalAttack, errTraverse := file.DetectTraversal(pathArg)
|
||||
if traversalAttack || errJoin != nil {
|
||||
log.Errorf("LIST - failed to clean path: %s - error: %s - traversal attack detected: %t - traversal attack detection: %s", pathArg, errJoin, traversalAttack, errTraverse)
|
||||
log.Errorf("ROUTES:ADMIN:Recache - Failed to clean path: %s - error: %s - traversal attack detected: %t - traversal attack detection: %s", pathArg, errJoin, traversalAttack, errTraverse)
|
||||
helpers.Return400Msg("invalid file path", w)
|
||||
return
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ func AdminReCache(w http.ResponseWriter, r *http.Request) {
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
err = json.NewEncoder(w).Encode(response)
|
||||
if err != nil {
|
||||
log.Errorf("AdminRecache - Failed to serialize JSON: %s", err)
|
||||
log.Errorf("ROUTES:ADMIN:Recache - Failed to serialize JSON: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
func AdminSysInfo(w http.ResponseWriter, r *http.Request) {
|
||||
func APIAdminSysInfo(w http.ResponseWriter, r *http.Request) {
|
||||
username, password, ok := r.BasicAuth()
|
||||
if ok {
|
||||
usernameHash := sha256.Sum256([]byte(username))
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
func HealthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
func APIHealthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
response := map[string]interface{}{}
|
||||
response["scanRunning"] = DirectoryCrawler.GetTotalActiveCrawls() > 0
|
||||
response["initialScanRunning"] = cache.InitialCrawlInProgress
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
func RestrictedDownloadPaths(w http.ResponseWriter, r *http.Request) {
|
||||
func APIRestrictedDownloadPaths(w http.ResponseWriter, r *http.Request) {
|
||||
response := config.GetConfig().RestrictedDownloadPaths
|
||||
helpers.WriteJsonResponse(response, false, w, r)
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
func Download(w http.ResponseWriter, r *http.Request) {
|
||||
func APIDownload(w http.ResponseWriter, r *http.Request) {
|
||||
if helpers.CheckInitialCrawl() {
|
||||
helpers.HandleRejectDuringInitialCrawl(w)
|
||||
return
|
||||
|
@ -30,7 +30,7 @@ func Download(w http.ResponseWriter, r *http.Request) {
|
|||
cleanPath, errJoin := file.SafeJoin(path)
|
||||
traversalAttack, errTraverse := file.DetectTraversal(path)
|
||||
if traversalAttack || errJoin != nil {
|
||||
log.Errorf("DOWNLOAD - failed to clean path: %s - error: %s - traversal attack detected: %t - traversal attack detection: %s", path, errJoin, traversalAttack, errTraverse)
|
||||
log.Errorf("ROUTES:Download - Failed to clean path: %s - error: %s - traversal attack detected: %t - traversal attack detection: %s", path, errJoin, traversalAttack, errTraverse)
|
||||
helpers.Return400Msg("invalid file path", w)
|
||||
return
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ func Download(w http.ResponseWriter, r *http.Request) {
|
|||
fullPath, errJoin := file.SafeJoin(pathArg)
|
||||
traversalAttack, errTraverse := file.DetectTraversal(pathArg)
|
||||
if traversalAttack || errJoin != nil {
|
||||
log.Errorf("DOWNLOAD - failed to clean path: %s - error: %s - traversal attack detected: %t - traversal attack detection: %s", pathArg, errJoin, traversalAttack, errTraverse)
|
||||
log.Errorf("ROUTES:Download - Failed to clean path: %s - error: %s - traversal attack detected: %t - traversal attack detection: %s", pathArg, errJoin, traversalAttack, errTraverse)
|
||||
helpers.Return400Msg("invalid file path", w)
|
||||
return
|
||||
}
|
||||
|
@ -99,14 +99,14 @@ func Download(w http.ResponseWriter, r *http.Request) {
|
|||
if item.MimeType == nil { // only if the MIME type of this item has not been set yet
|
||||
_, mimeType, _, err = file.GetMimeType(fullPath, true, nil)
|
||||
if err != nil {
|
||||
log.Errorf("Error detecting MIME type: %v", err)
|
||||
log.Errorf("ROUTES:Download - Error detecting MIME type: %v", err)
|
||||
} else if mimeType != "" {
|
||||
// GetMimeType() returns an empty string if it was a directory.
|
||||
// Update the CacheItem's MIME in the sharedCache.
|
||||
item.MimeType = &mimeType
|
||||
SharedCache.Cache.Add(relPath, item)
|
||||
} else {
|
||||
log.Errorf("Download.go failed to match a condition when checking a file's MIME - %s", fullPath)
|
||||
log.Errorf("ROUTES:Download - Failed to match a condition when checking a file's MIME - %s", fullPath)
|
||||
helpers.Return500Msg(w)
|
||||
}
|
||||
} else {
|
||||
|
@ -119,7 +119,7 @@ func Download(w http.ResponseWriter, r *http.Request) {
|
|||
if item.Encoding == nil || *item.Encoding == "" { // only if the encoding of this item has not been set yet
|
||||
encoding, err = file.DetectFileEncoding(fullPath)
|
||||
if err != nil {
|
||||
log.Warnf("Error detecting file encoding: %v", err)
|
||||
log.Warnf("ROUTES:Download - Error detecting file encoding: %v", err)
|
||||
} else {
|
||||
// Update the object in the cache.
|
||||
item.Encoding = &encoding
|
||||
|
@ -155,7 +155,7 @@ func Download(w http.ResponseWriter, r *http.Request) {
|
|||
// Get the file info
|
||||
fileInfo, err := openFile.Stat()
|
||||
if err != nil {
|
||||
log.Errorf(`DOWNLOAD - failed to stat file "%s" - %s`, fullPath, err)
|
||||
log.Errorf(`ROUTES:Download - Failed to stat file "%s" - %s`, fullPath, err)
|
||||
helpers.Return500Msg(w)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"strconv"
|
||||
)
|
||||
|
||||
func ListDir(w http.ResponseWriter, r *http.Request) {
|
||||
func APIList(w http.ResponseWriter, r *http.Request) {
|
||||
if helpers.CheckInitialCrawl() {
|
||||
helpers.HandleRejectDuringInitialCrawl(w)
|
||||
return
|
||||
|
@ -38,7 +38,7 @@ func ListDir(w http.ResponseWriter, r *http.Request) {
|
|||
fullPath, errJoin := file.SafeJoin(pathArg)
|
||||
traversalAttack, errTraverse := file.DetectTraversal(pathArg)
|
||||
if traversalAttack || errJoin != nil {
|
||||
log.Errorf("LIST - failed to clean path: %s - error: %s - traversal attack detected: %t - traversal attack detection: %s", pathArg, errJoin, traversalAttack, errTraverse)
|
||||
log.Errorf("ROUTES:List - Failed to clean path: %s - error: %s - traversal attack detected: %t - traversal attack detection: %s", pathArg, errJoin, traversalAttack, errTraverse)
|
||||
helpers.Return400Msg("invalid file path", w)
|
||||
return
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ func ListDir(w http.ResponseWriter, r *http.Request) {
|
|||
helpers.ReturnFake404Msg("file not found", w)
|
||||
}
|
||||
if err != nil {
|
||||
log.Warnf("Error detecting MIME type: %v", err)
|
||||
log.Warnf("ROUTES:List - Error detecting MIME type: %v", err)
|
||||
helpers.Return500Msg(w)
|
||||
return
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ func ListDir(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
paginationLimit, err = strconv.Atoi(r.URL.Query().Get("limit"))
|
||||
if err != nil {
|
||||
log.Errorf("Error parsing limit: %v", err)
|
||||
log.Errorf("ROUTES:List - Error parsing limit: %v", err)
|
||||
helpers.Return400Msg("limit must be a valid integer", w)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
func Thumbnail(w http.ResponseWriter, r *http.Request) {
|
||||
func APIThumbnail(w http.ResponseWriter, r *http.Request) {
|
||||
if cache.InitialCrawlInProgress && !config.GetConfig().HttpAllowDuringInitialCrawl {
|
||||
helpers.HandleRejectDuringInitialCrawl(w)
|
||||
returnDummyPNG(w)
|
||||
|
@ -83,7 +83,7 @@ func Thumbnail(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("THUMB - error detecting MIME type: %v", err)
|
||||
log.Errorf("ROUTES:Thumb - Error detecting MIME type: %v", err)
|
||||
returnDummyPNG(w)
|
||||
return
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ func Thumbnail(w http.ResponseWriter, r *http.Request) {
|
|||
// Convert the image to a PNG
|
||||
imageBytes, err := file.ConvertToPNG(fullPath, mimeType)
|
||||
if err != nil {
|
||||
log.Warnf("Error converting %s to PNG: %v", fullPath, err)
|
||||
log.Warnf("ROUTES:Thumb - Error converting %s to PNG: %v", fullPath, err)
|
||||
returnDummyPNG(w)
|
||||
return
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ func Thumbnail(w http.ResponseWriter, r *http.Request) {
|
|||
var img image.Image
|
||||
img, err = png.Decode(bytes.NewReader(imageBytes))
|
||||
if err != nil {
|
||||
log.Warnf("Error decoding %s image data: %v", fullPath, err)
|
||||
log.Warnf("ROUTES:Thumb - Error decoding %s image data: %v", fullPath, err)
|
||||
returnDummyPNG(w)
|
||||
return
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ func Thumbnail(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
buf, err := file.CompressPNGFile(img, pngQuality)
|
||||
if err != nil {
|
||||
log.Warnf("Error compressing %s to PNG: %v", fullPath, err)
|
||||
log.Warnf("ROUTES:Thumb - Error compressing %s to PNG: %v", fullPath, err)
|
||||
returnDummyPNG(w)
|
||||
return
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ func returnDummyPNG(w http.ResponseWriter) {
|
|||
w.Header().Set("Content-Type", "image/png")
|
||||
_, err := w.Write(buffer.Bytes())
|
||||
if err != nil {
|
||||
log.Errorf("THUMBNAIL - Failed to write buffer: %s", err)
|
||||
log.Errorf("ROUTES:Thumb - Failed to write buffer: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ func init() {
|
|||
func InitialCrawl() {
|
||||
log = logging.GetLogger()
|
||||
|
||||
log.Infof("INITIAL CRAWL - starting the crawl for %s", config.GetConfig().RootDir)
|
||||
log.Infof("CRAWLER:Inital - Starting the crawl for %s", config.GetConfig().RootDir)
|
||||
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
go logCacheStatus("INITIAL CRAWL", ticker, log.Infof)
|
||||
|
@ -31,7 +31,7 @@ func InitialCrawl() {
|
|||
//start := time.Now()
|
||||
err := dc.Crawl(config.GetConfig().RootDir, nil)
|
||||
if err != nil {
|
||||
log.Errorf("LIST - background recursive crawl failed: %s", err)
|
||||
log.Errorf("CRAWLER:Inital - failed: %s", err)
|
||||
}
|
||||
InitialCrawlInProgress = false
|
||||
InitialCrawlLock.Unlock()
|
||||
|
|
|
@ -21,14 +21,14 @@ func InitRecacheSemaphore(limit int) {
|
|||
func CheckAndRecache(path string) {
|
||||
item, found := SharedCache.Cache.Get(path)
|
||||
if found && time.Now().UnixNano()/int64(time.Millisecond)-item.Cached > int64(config.GetConfig().CacheTime)*60*1000 {
|
||||
log.Debugf("Re-caching: %s", path)
|
||||
log.Debugf("CACHE:Recache - re-caching: %s", path)
|
||||
sem <- struct{}{} // acquire a token
|
||||
go func() {
|
||||
defer func() { <-sem }() // release the token when done
|
||||
dc := DirectoryCrawler.NewDirectoryCrawler()
|
||||
err := dc.Crawl(path, nil)
|
||||
if err != nil {
|
||||
log.Errorf("RECACHE ERROR: %s", err.Error())
|
||||
log.Errorf("CACHE:Recache - %s", err.Error())
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
@ -41,7 +41,7 @@ func Recache(path string) error {
|
|||
return errors.New("rejecting crawl, already in progress for this path")
|
||||
}
|
||||
|
||||
log.Debugf("Re-caching: %s", path)
|
||||
log.Debugf("CACHE:Recache - Re-caching: %s", path)
|
||||
start := time.Now()
|
||||
sem <- struct{}{} // acquire a token
|
||||
go func() {
|
||||
|
@ -49,7 +49,7 @@ func Recache(path string) error {
|
|||
dc := DirectoryCrawler.NewDirectoryCrawler()
|
||||
err := dc.Crawl(path, nil)
|
||||
if err != nil {
|
||||
log.Errorf("RECACHE ERROR: %s", err.Error())
|
||||
log.Errorf("CACHE:Recache - %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,7 @@ func Recache(path string) error {
|
|||
// Update the parent directory's Children field to include the new subdirectory
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
log.Errorf("RECACHE ERROR: %s", err.Error())
|
||||
log.Errorf("CACHE:Recache - %s", err.Error())
|
||||
return
|
||||
} else {
|
||||
newItem := CacheItem.NewItem(path, info)
|
||||
|
@ -88,16 +88,16 @@ func Recache(path string) error {
|
|||
SharedCache.Cache.Add(parentDir, parentItem)
|
||||
}
|
||||
} else if !CacheItem.PathOutsideRoot(parentDir) {
|
||||
// If the parent directory isn't in the cache, crawl it
|
||||
log.Infof("RECACHE - crawling parent directory since it isn't in the cache yet: %s", parentDir)
|
||||
// If the parent directory isn't in the cache, crawl it.
|
||||
log.Infof("CACHE:Recache - Crawling parent directory since it isn't in the cache yet: %s", parentDir)
|
||||
_, err := dc.CrawlNoRecursion(parentDir)
|
||||
if err != nil {
|
||||
log.Errorf("RECACHE ERROR: %s", err.Error())
|
||||
log.Errorf("CACHE:Recache - %s", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
duration := time.Since(start).Round(time.Second)
|
||||
log.Infof("RECACHE - completed in %s - %s", duration, path)
|
||||
log.Infof("CACHE:Recache - Completed in %s - %s", duration, path)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -76,14 +76,14 @@ func searchKey(key string, queryString string, excludeElements []string, sem cha
|
|||
dec := gob.NewDecoder(&buf)
|
||||
err := enc.Encode(cacheItem)
|
||||
if err != nil {
|
||||
log.Printf("Error encoding CacheItem: %v", err)
|
||||
log.Printf("CACHE:searchKey - Error encoding CacheItem: %v", err)
|
||||
resultsChan <- nil
|
||||
return
|
||||
}
|
||||
var item CacheItem.Item
|
||||
err = dec.Decode(&item)
|
||||
if err != nil {
|
||||
log.Printf("Error decoding CacheItem: %v", err)
|
||||
log.Printf("CACHE:searchKey - Error decoding CacheItem: %v", err)
|
||||
resultsChan <- nil
|
||||
return
|
||||
}
|
||||
|
|
|
@ -28,9 +28,9 @@ func worker() {
|
|||
// If a key in Elastic does not exist in the LRU cache, delete it from Elastic.
|
||||
err := deleteFromElasticsearch(job.Key)
|
||||
if err != nil {
|
||||
log.Errorf(`ELASTIC - Error deleting key "%s" - %s`, job.Key, err)
|
||||
log.Errorf(`ELASTIC:Delete - Error deleting key "%s" - %s`, job.Key, err)
|
||||
} else {
|
||||
log.Debugf(`ELASTIC - Deleted path: "%s"`, job.Path)
|
||||
log.Debugf(`ELASTIC:Delete - Deleted path: "%s"`, job.Path)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ func SyncThread() {
|
|||
ticker := time.NewTicker(time.Duration(config.GetConfig().ElasticsearchSyncInterval) * time.Second)
|
||||
fullSyncTicker := time.NewTicker(time.Duration(config.GetConfig().ElasticsearchFullSyncInterval) * time.Second)
|
||||
|
||||
log.Debugf("ELASTIC - started sync timers. Refresh: %d sec. Full: %d sec.", config.GetConfig().ElasticsearchSyncInterval, config.GetConfig().ElasticsearchFullSyncInterval)
|
||||
log.Debugf("ELASTIC - Started sync timers. Refresh: %d sec. Full: %d sec.", config.GetConfig().ElasticsearchSyncInterval, config.GetConfig().ElasticsearchFullSyncInterval)
|
||||
|
||||
for {
|
||||
select {
|
||||
|
@ -42,7 +42,7 @@ func SyncThread() {
|
|||
// TODO: have the workers exit when the sync job is finished
|
||||
func syncElasticsearch(doFullSync bool) {
|
||||
if !ElasticEnabled {
|
||||
log.Debugln("ELASTIC - disabled, not syncing.")
|
||||
log.Debugln("ELASTIC - Disabled, not syncing.")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -53,13 +53,13 @@ func syncElasticsearch(doFullSync bool) {
|
|||
var syncType string
|
||||
if doFullSync {
|
||||
if !FullSyncRunning.TryAcquire(1) {
|
||||
log.Fatalln("ELASTIC - failed to acquire the FullSyncRunning semaphore. This is a logic error.")
|
||||
log.Fatalln("ELASTIC - Failed to acquire the FullSyncRunning semaphore. This is a logic error.")
|
||||
}
|
||||
defer FullSyncRunning.Release(1)
|
||||
syncType = "full refresh"
|
||||
} else {
|
||||
if !RefreshSyncRunning.TryAcquire(1) {
|
||||
log.Fatalln("ELASTIC - failed to acquire the RefreshSyncRunning semaphore. This is a logic error.")
|
||||
log.Fatalln("ELASTIC - Failed to acquire the RefreshSyncRunning semaphore. This is a logic error.")
|
||||
}
|
||||
defer RefreshSyncRunning.Release(1)
|
||||
syncType = "refresh"
|
||||
|
@ -69,7 +69,7 @@ func syncElasticsearch(doFullSync bool) {
|
|||
defer func() { fullSync = false }()
|
||||
fullSync = doFullSync
|
||||
|
||||
log.Infof("ELASTIC - started a %s sync.", syncType)
|
||||
log.Infof("ELASTIC - Started a %s sync.", syncType)
|
||||
start := time.Now()
|
||||
|
||||
var err error
|
||||
|
@ -84,7 +84,7 @@ func syncElasticsearch(doFullSync bool) {
|
|||
dc := DirectoryCrawler.NewDirectoryCrawler()
|
||||
err = dc.Crawl(config.GetConfig().RootDir, addToElasticsearch)
|
||||
if err != nil {
|
||||
log.Errorf("ELASTIC - crawl failed: %s", err)
|
||||
log.Errorf("ELASTIC - Crawl failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -110,5 +110,5 @@ func EnableElasticsearchConnection() {
|
|||
|
||||
func LogElasticQuit() {
|
||||
ElasticEnabled = false
|
||||
log.Errorln("ELASTIC - background thread exiting, Elastic indexing and search will not be available.")
|
||||
log.Errorln("ELASTIC - Background thread exiting, Elastic indexing and search will not be available.")
|
||||
}
|
||||
|
|
|
@ -27,11 +27,11 @@ func addToElasticsearch(fullPath string, info os.FileInfo, incomingErr error) er
|
|||
cacheItem, found := SharedCache.Cache.Get(relPath)
|
||||
if !found {
|
||||
// This sort of thing can happen if new files have been added on disk but a scan has not been run to refresh the cache.
|
||||
log.Debugf(`ELASTICSEARCH - path "%s" exists on disk, but not in the LRU cache. Deleting from Elastic.`, relPath)
|
||||
log.Debugf(`ELASTIC:Add - Path "%s" exists on disk, but not in the LRU cache. Deleting from Elastic.`, relPath)
|
||||
// Delete this item from Elastic in order to avoid any strange inconsistencies.
|
||||
err := deleteFromElasticsearch(encodeToBase64(relPath))
|
||||
if err != nil {
|
||||
log.Errorf("ELASTIC - failed to delete \"%s\" - %s", relPath, err)
|
||||
log.Errorf("ELASTIC:Add - Failed to delete \"%s\" - %s", relPath, err)
|
||||
}
|
||||
} else {
|
||||
if _, ok := globalPathsByKey[relPath]; ok {
|
||||
|
@ -50,13 +50,13 @@ func addToElasticsearch(fullPath string, info os.FileInfo, incomingErr error) er
|
|||
func preformAddToElasticsearch(item *CacheItem.Item) {
|
||||
preparedItem, err := prepareCacheItem(item)
|
||||
if err != nil {
|
||||
log.Printf("ELASTIC - Error preparing new item: %s", err)
|
||||
log.Printf("ELASTIC:Add - Error preparing new item: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(preparedItem)
|
||||
if err != nil {
|
||||
log.Printf("ELASTIC - Error marshaling new item: %s", err)
|
||||
log.Printf("ELASTIC:Add - Error marshaling new item: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ func preformAddToElasticsearch(item *CacheItem.Item) {
|
|||
}
|
||||
res, err := req.Do(context.Background(), ElasticClient)
|
||||
if err != nil {
|
||||
log.Errorf("ELASTIC - Error getting response: %s", err)
|
||||
log.Errorf("ELASTIC:Add - Error getting response: %s", err)
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
@ -76,12 +76,12 @@ func preformAddToElasticsearch(item *CacheItem.Item) {
|
|||
if res.IsError() {
|
||||
var e map[string]interface{}
|
||||
if err := json.NewDecoder(res.Body).Decode(&e); err != nil {
|
||||
log.Printf("Error parsing the response body: %s", err)
|
||||
log.Errorf("ELASTIC:Add - Error parsing the response body: %s", err)
|
||||
}
|
||||
log.Errorf(`ELASTIC - Error indexing document "%s" - Status code: %d - %s`, item.Path, res.StatusCode, e)
|
||||
log.Errorf(`ELASTIC:Add - Error indexing document "%s" - Status code: %d - %s`, item.Path, res.StatusCode, e)
|
||||
}
|
||||
|
||||
log.Debugf(`ELASTIC - Added: "%s"`, preparedItem.Path)
|
||||
log.Debugf(`ELASTIC:Add - Added: "%s"`, preparedItem.Path)
|
||||
}
|
||||
|
||||
// prepareCacheItem is used to get an item ready to insert into Elastic.
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
)
|
||||
|
||||
func startRemoveStaleItemsFromElasticsearch(pathsByKey map[string]string) {
|
||||
log.Debugln("ELASTIC - Checking for deleted items that need to be removed from Elastic...")
|
||||
log.Debugln("ELASTIC:Delete - Checking for deleted items that need to be removed from Elastic...")
|
||||
|
||||
// TODO: use waitgroups here so we know when all the jobs are done and we can erase globalKeysByPath and globalPathsByKey
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ func createCrazyfsIndex() {
|
|||
// Check if index exists
|
||||
res, err := ElasticClient.Indices.Exists([]string{config.GetConfig().ElasticsearchIndex})
|
||||
if err != nil {
|
||||
log.Fatalf("Error checking if index exists: %s", err)
|
||||
log.Fatalf("ELASTIC - Error checking if index exists: %s", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
|
@ -18,14 +18,14 @@ func createCrazyfsIndex() {
|
|||
} else if res.StatusCode == 404 {
|
||||
res, err = ElasticClient.Indices.Create(config.GetConfig().ElasticsearchIndex)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating index: %s", err)
|
||||
log.Fatalf("ELASTIC - Error creating index: %s", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.IsError() {
|
||||
log.Printf("Error creating index: %s", res.String())
|
||||
log.Fatalf("ELASTIC - Elasticsearch returned error when trying to create index: %s", res.String())
|
||||
}
|
||||
|
||||
log.Infof(`Created a new index named "%s"`, config.GetConfig().ElasticsearchIndex)
|
||||
log.Infof(`ELASTIC - Created a new index named "%s"`, config.GetConfig().ElasticsearchIndex)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ func GetMimeType(path string, analyze bool, passedInfo *os.FileInfo) (bool, stri
|
|||
if analyze {
|
||||
MIME, err = mimetype.DetectFile(path)
|
||||
if err != nil {
|
||||
log.Errorf("Error analyzing MIME type: %v", err)
|
||||
log.Errorf("FILE:GetMimeType - Error analyzing MIME type: %v", err)
|
||||
return false, "", "", err
|
||||
}
|
||||
mimeType = MIME.String()
|
||||
|
|
Loading…
Reference in New Issue