reorganize admin crawl info json, clarify debug message

This commit is contained in:
Cyberes 2024-01-23 15:11:15 -07:00
parent ba2747c186
commit 665a2e8c18
3 changed files with 6 additions and 6 deletions

View File

@ -52,9 +52,7 @@ func AdminCrawlsInfo(w http.ResponseWriter, r *http.Request) {
"deleteWorkers": map[string]interface{}{
"busy": elastic.BusyWorkers,
"alive": config.GetConfig().ElasticsearchSyncThreads,
},
"queue": map[string]interface{}{
"size": elastic.Queue.GetQueueSize(),
"queueSize": elastic.Queue.GetQueueSize(),
},
"syncRunning": map[string]interface{}{
"refresh": refreshSyncRunning,

View File

@ -26,7 +26,8 @@ func addToElasticsearch(fullPath string, info os.FileInfo, incomingErr error) er
if !shouldExclude(relPath, config.GetConfig().ElasticsearchExcludePatterns) {
cacheItem, found := SharedCache.Cache.Get(relPath)
if !found {
log.Warnf(`ELASTICSEARCH - Could not fetch item "%s" from the LRU cache! Deleting this item from Elastic. This error can probably be ignored.`, relPath)
log.Debugf(`ELASTICSEARCH - path "%s" exists on disk, but not in the LRU cache. Deleting from Elastic.`, relPath)
// Delete this item from Elastic in order to avoid any strange inconsistencies.
err := deleteFromElasticsearch(encodeToBase64(relPath))
if err != nil {
log.Errorf("ELASTIC - failed to delete \"%s\" - %s", relPath, err)

View File

@ -1,3 +1,4 @@
- Use the Elastic delete pool to add new items to the database. Don't use the main pool because that slows everything down when a file crawl and Elastic are running at the same time
- on the client health page, show the time the initial crawl started