adjust json response, minor clean up
This commit is contained in:
parent
fdd7f08b12
commit
933670c63a
|
@ -152,7 +152,7 @@ func APISearch(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
searchDuration := time.Since(searchStart) // .Round(time.Second)
|
searchDuration := time.Since(searchStart) // .Round(time.Second)
|
||||||
log.Debugf(`ROUTES:Search - %s - Query: "%s" - Results: %d - Elapsed: %d`, logging.GetRealIP(r), queryString, len(results), searchDuration)
|
log.Debugf(`ROUTES:Search - %s - Query: "%s" - Results: %d - Elapsed: %s`, logging.GetRealIP(r), queryString, len(results), searchDuration)
|
||||||
|
|
||||||
response := map[string]interface{}{
|
response := map[string]interface{}{
|
||||||
"results": results,
|
"results": results,
|
||||||
|
|
|
@ -37,19 +37,20 @@ func APIAdminCrawlsInfo(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var elasticWorkers map[string]interface{}
|
var elasticWorkers map[string]interface{}
|
||||||
|
var elasticQueueSize int
|
||||||
if (refreshSyncRunning || fullSyncRunning) && globals.ElasticCrawlers != nil {
|
if (refreshSyncRunning || fullSyncRunning) && globals.ElasticCrawlers != nil {
|
||||||
// If no sync is running then these vars will not exist.
|
// If no sync is running then these vars will not exist.
|
||||||
elasticWorkers = map[string]interface{}{
|
elasticWorkers = map[string]interface{}{
|
||||||
"busy": globals.ElasticCrawlers.BusyWorkers,
|
"busy": globals.ElasticCrawlers.BusyWorkers,
|
||||||
"alive": config.GetConfig().ElasticsearchSyncWorkers,
|
"alive": config.GetConfig().ElasticsearchSyncWorkers,
|
||||||
"queueSize": globals.ElasticCrawlers.Queue.GetQueuedJobs(),
|
|
||||||
}
|
}
|
||||||
|
elasticQueueSize = globals.ElasticCrawlers.Queue.GetQueuedJobs()
|
||||||
} else {
|
} else {
|
||||||
elasticWorkers = map[string]interface{}{
|
elasticWorkers = map[string]interface{}{
|
||||||
"busy": 0,
|
"busy": 0,
|
||||||
"alive": 0,
|
"alive": 0,
|
||||||
"queueSize": 0,
|
|
||||||
}
|
}
|
||||||
|
elasticQueueSize = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
response := map[string]interface{}{
|
response := map[string]interface{}{
|
||||||
|
@ -66,7 +67,8 @@ func APIAdminCrawlsInfo(w http.ResponseWriter, r *http.Request) {
|
||||||
"initialCrawlElapsed": config.InitialCrawlElapsed,
|
"initialCrawlElapsed": config.InitialCrawlElapsed,
|
||||||
},
|
},
|
||||||
"elastic": map[string]interface{}{
|
"elastic": map[string]interface{}{
|
||||||
"workers": elasticWorkers,
|
"workers": elasticWorkers,
|
||||||
|
"queueSize": elasticQueueSize,
|
||||||
"syncRunning": map[string]interface{}{
|
"syncRunning": map[string]interface{}{
|
||||||
"refresh": refreshSyncRunning,
|
"refresh": refreshSyncRunning,
|
||||||
"full": fullSyncRunning,
|
"full": fullSyncRunning,
|
||||||
|
|
|
@ -116,7 +116,6 @@ func main() {
|
||||||
// Start the Elastic connection, so it can initialize while we're doing the initial crawl.
|
// Start the Elastic connection, so it can initialize while we're doing the initial crawl.
|
||||||
// If we fail to establish a connection to Elastic, don't kill the entire server. Instead, just disable Elastic.
|
// If we fail to establish a connection to Elastic, don't kill the entire server. Instead, just disable Elastic.
|
||||||
if cfg.ElasticsearchEnable && !cliArgs.disableElasticSync {
|
if cfg.ElasticsearchEnable && !cliArgs.disableElasticSync {
|
||||||
fmt.Println(config.GetConfig().ElasticsearchSyncWorkers + 1)
|
|
||||||
esCfg := elasticsearch.Config{
|
esCfg := elasticsearch.Config{
|
||||||
Addresses: []string{
|
Addresses: []string{
|
||||||
cfg.ElasticsearchEndpoint,
|
cfg.ElasticsearchEndpoint,
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO: is there a better way to store this data? It appears to eat up a lot of memory.
|
||||||
// existingKeys is a global variable called by the Walker callback: addToElasticsearch().
|
// existingKeys is a global variable called by the Walker callback: addToElasticsearch().
|
||||||
// It is set only by syncElasticsearch() when a sync is started. Only one sync can run at a time.
|
// It is set only by syncElasticsearch() when a sync is started. Only one sync can run at a time.
|
||||||
// A global is needed since there is no way to pass variables like this to the workers.
|
// A global is needed since there is no way to pass variables like this to the workers.
|
||||||
|
|
|
@ -10,14 +10,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func getElasticSize() (int, error) {
|
|
||||||
keysByPath, _, err := getPathsFromIndex(true, 100)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
return len(keysByPath), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPathsFromIndex(doScroll bool, withSize int) (map[string]string, map[string]string, error) {
|
func getPathsFromIndex(doScroll bool, withSize int) (map[string]string, map[string]string, error) {
|
||||||
// This may take a bit if the index is very large, so avoid calling this.
|
// This may take a bit if the index is very large, so avoid calling this.
|
||||||
|
|
||||||
|
|
|
@ -33,23 +33,17 @@ func GetRealIP(r *http.Request) string {
|
||||||
// Or just use the header the user specified.
|
// Or just use the header the user specified.
|
||||||
forwarded = r.Header.Get(config.GetConfig().HTTPRealIPHeader)
|
forwarded = r.Header.Get(config.GetConfig().HTTPRealIPHeader)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ip
|
return ip
|
||||||
}
|
}
|
||||||
|
|
||||||
func LogRequest(handler http.Handler) http.Handler {
|
func LogRequest(handler http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
sw := statusWriter{ResponseWriter: w, status: http.StatusOK} // set default status
|
sw := statusWriter{ResponseWriter: w, status: http.StatusOK} // set default status
|
||||||
|
start := time.Now()
|
||||||
start := time.Now() // record the start time
|
|
||||||
handler.ServeHTTP(&sw, r)
|
handler.ServeHTTP(&sw, r)
|
||||||
end := time.Now() // record the end time
|
end := time.Now()
|
||||||
|
|
||||||
// calculate the duration
|
|
||||||
duration := end.Sub(start)
|
duration := end.Sub(start)
|
||||||
|
|
||||||
ip := GetRealIP(r)
|
ip := GetRealIP(r)
|
||||||
|
|
||||||
log.Infof("HTTP - %s %d %s from %s took %v", r.Method, sw.status, r.URL.RequestURI(), ip, duration)
|
log.Infof("HTTP - %s %d %s from %s took %v", r.Method, sw.status, r.URL.RequestURI(), ip, duration)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,12 +45,12 @@ func NewResponseItem(cacheItem *cacheitem.Item) *ResponseItem {
|
||||||
IsDir: cacheItem.IsDir,
|
IsDir: cacheItem.IsDir,
|
||||||
IsSymlink: cacheItem.IsSymlink,
|
IsSymlink: cacheItem.IsSymlink,
|
||||||
Cached: cacheItem.Cached,
|
Cached: cacheItem.Cached,
|
||||||
Children: make([]*ResponseItem, len(cacheItem.Children)),
|
Children: make([]*ResponseItem, len(cacheItem.Children)), // avoid a null entry for the children key in the JSON.
|
||||||
MimeType: cacheItem.MimeType,
|
MimeType: cacheItem.MimeType,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grab the children from the cache and add them to newResponseItem.
|
// Grab the children from the cache and add them to newResponseItem.
|
||||||
if len(cacheItem.Children) > 0 { // avoid a null entry for the children key in the JSON.
|
if len(cacheItem.Children) > 0 {
|
||||||
var children []*ResponseItem
|
var children []*ResponseItem
|
||||||
for _, child := range cacheItem.Children {
|
for _, child := range cacheItem.Children {
|
||||||
childItem, found := sharedcache.Cache.Get(child)
|
childItem, found := sharedcache.Cache.Get(child)
|
||||||
|
|
Loading…
Reference in New Issue