-
-
Notifications
You must be signed in to change notification settings - Fork 34
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #167 from Notifiarr/dn2_dashboard
Fixes Aplenty
- Loading branch information
Showing
17 changed files
with
446 additions
and
255 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,117 @@ | ||
//nolint:dupl | ||
package client | ||
|
||
import ( | ||
"context" | ||
"encoding/json" | ||
"fmt" | ||
"net/http" | ||
"sync" | ||
|
||
"github.com/Notifiarr/notifiarr/pkg/apps" | ||
"github.com/Notifiarr/notifiarr/pkg/notifiarr" | ||
"github.com/gorilla/mux" | ||
) | ||
|
||
/* The site relies on release and quality profiles data from Radarr and Sonarr. | ||
* If someone has several instances, it causes slow page loads times. | ||
* So we made this file to aggregate responses from each of the app types. | ||
*/ | ||
|
||
func (c *Client) aggregateTrash(req *http.Request) (int, interface{}) { | ||
var wait sync.WaitGroup | ||
defer wait.Wait() | ||
|
||
var input struct { | ||
Radarr struct { // used for "all" | ||
Instances notifiarr.IntList `json:"instances"` | ||
} `json:"radarr"` | ||
Sonarr struct { // used for "all" | ||
Instances notifiarr.IntList `json:"instances"` | ||
} `json:"sonarr"` | ||
Instances notifiarr.IntList `json:"instances"` | ||
} | ||
// Extract POST payload. | ||
err := json.NewDecoder(req.Body).Decode(&input) | ||
|
||
switch app := mux.Vars(req)["app"]; { | ||
default: | ||
return http.StatusBadRequest, fmt.Errorf("%w: %s", apps.ErrInvalidApp, app) | ||
case err != nil: | ||
return http.StatusBadRequest, fmt.Errorf("decoding POST payload: (app: %s) %w", app, err) | ||
case app == "sonarr": | ||
return http.StatusOK, c.aggregateTrashSonarr(req.Context(), &wait, input.Instances) | ||
case app == "radarr": | ||
return http.StatusOK, c.aggregateTrashRadarr(req.Context(), &wait, input.Instances) | ||
case app == "all": | ||
return http.StatusOK, map[string]interface{}{ | ||
"radarr": c.aggregateTrashRadarr(req.Context(), &wait, input.Radarr.Instances), | ||
"sonarr": c.aggregateTrashSonarr(req.Context(), &wait, input.Sonarr.Instances), | ||
} | ||
} | ||
} | ||
|
||
func (c *Client) aggregateTrashSonarr(ctx context.Context, wait *sync.WaitGroup, | ||
instances notifiarr.IntList) []*notifiarr.SonarrTrashPayload { | ||
output := []*notifiarr.SonarrTrashPayload{} | ||
// Create our known+requested instances, so we can write slice values in go routines. | ||
for i, app := range c.Config.Apps.Sonarr { | ||
if instance := i + 1; instances.Has(instance) { | ||
output = append(output, ¬ifiarr.SonarrTrashPayload{Instance: instance, Name: app.Name}) | ||
} | ||
} | ||
|
||
var err error | ||
// Grab data for each requested instance in parallel/go routine. | ||
for idx := range output { | ||
wait.Add(1) | ||
|
||
go func(idx, instance int) { | ||
defer wait.Done() | ||
// Add the profiles, and/or error into our data structure/output data. | ||
app := c.Config.Apps.Sonarr[instance-1] | ||
if output[idx].QualityProfiles, err = app.GetQualityProfilesContext(ctx); err != nil { | ||
output[idx].Error = fmt.Sprintf("getting quality profiles: %v", err) | ||
c.Errorf("Handling Sonarr API request (%d): %s", instance, output[idx].Error) | ||
} else if output[idx].ReleaseProfiles, err = app.GetReleaseProfilesContext(ctx); err != nil { | ||
output[idx].Error = fmt.Sprintf("getting release profiles: %v", err) | ||
c.Errorf("Handling Sonarr API request (%d): %s", instance, output[idx].Error) | ||
} | ||
}(idx, output[idx].Instance) | ||
} | ||
|
||
return output | ||
} | ||
|
||
// This is basically a duplicate of the above code. | ||
func (c *Client) aggregateTrashRadarr(ctx context.Context, wait *sync.WaitGroup, | ||
instances notifiarr.IntList) []*notifiarr.RadarrTrashPayload { | ||
output := []*notifiarr.RadarrTrashPayload{} | ||
// Create our known+requested instances, so we can write slice values in go routines. | ||
for i, app := range c.Config.Apps.Radarr { | ||
if instance := i + 1; instances.Has(instance) { | ||
output = append(output, ¬ifiarr.RadarrTrashPayload{Instance: instance, Name: app.Name}) | ||
} | ||
} | ||
|
||
var err error | ||
// Grab data for each requested instance in parallel/go routine. | ||
for idx := range output { | ||
wait.Add(1) | ||
|
||
go func(idx, instance int) { | ||
defer wait.Done() | ||
// Add the profiles, and/or error into our data structure/output data. | ||
app := c.Config.Apps.Radarr[instance-1] | ||
if output[idx].QualityProfiles, err = app.GetQualityProfilesContext(ctx); err != nil { | ||
output[idx].Error = fmt.Sprintf("getting quality profiles: %v", err) | ||
c.Errorf("Handling Radarr API request (%d): %s", instance, output[idx].Error) | ||
} else if output[idx].CustomFormats, err = app.GetCustomFormatsContext(ctx); err != nil { | ||
output[idx].Error = fmt.Sprintf("getting custom formats: %v", err) | ||
c.Errorf("Handling Radarr API request (%d): %s", instance, output[idx].Error) | ||
} | ||
}(idx, output[idx].Instance) | ||
} | ||
|
||
return output | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,162 @@ | ||
package client | ||
|
||
import ( | ||
"context" | ||
"net/http" | ||
"sync" | ||
|
||
"github.com/Notifiarr/notifiarr/pkg/apps" | ||
"github.com/Notifiarr/notifiarr/pkg/plex" | ||
) | ||
|
||
/* The version handler gets the version from a bunch of apps and returns them. */ | ||
|
||
type conTest struct { | ||
Instance int `json:"instance"` | ||
Up bool `json:"up"` | ||
Status interface{} `json:"systemStatus,omitempty"` | ||
} | ||
|
||
// versionHandler returns application run and build time data and application statuses: /api/version. | ||
func (c *Client) versionHandler(r *http.Request) (int, interface{}) { | ||
output := c.website.Info() | ||
output["appsStatus"] = c.appStatsForVersion(r.Context()) | ||
|
||
if host, err := c.website.GetHostInfoUID(); err != nil { | ||
output["hostError"] = err.Error() | ||
} else { | ||
output["host"] = host | ||
} | ||
|
||
return http.StatusOK, output | ||
} | ||
|
||
// appStatsForVersion loops each app and gets the version info. | ||
func (c *Client) appStatsForVersion(ctx context.Context) map[string]interface{} { | ||
var ( | ||
lid = make([]*conTest, len(c.Config.Apps.Lidarr)) | ||
prl = make([]*conTest, len(c.Config.Apps.Prowlarr)) | ||
rad = make([]*conTest, len(c.Config.Apps.Radarr)) | ||
read = make([]*conTest, len(c.Config.Apps.Readarr)) | ||
son = make([]*conTest, len(c.Config.Apps.Sonarr)) | ||
plx = []*conTest{} | ||
wg sync.WaitGroup | ||
) | ||
|
||
getPlexVersion(ctx, &wg, c.Config.Plex, &plx) | ||
getLidarrVersion(ctx, &wg, c.Config.Apps.Lidarr, lid) | ||
getProwlarrVersion(ctx, &wg, c.Config.Apps.Prowlarr, prl) | ||
getRadarrVersion(ctx, &wg, c.Config.Apps.Radarr, rad) | ||
getReadarrVersion(ctx, &wg, c.Config.Apps.Readarr, read) | ||
getSonarrVersion(ctx, &wg, c.Config.Apps.Sonarr, son) | ||
wg.Wait() | ||
|
||
return map[string]interface{}{ | ||
"lidarr": lid, | ||
"radarr": rad, | ||
"readarr": read, | ||
"sonarr": son, | ||
"prowlarr": prl, | ||
"plex": plx, | ||
} | ||
} | ||
|
||
func getLidarrVersion(ctx context.Context, wait *sync.WaitGroup, lidarrs []*apps.LidarrConfig, lid []*conTest) { | ||
for idx, app := range lidarrs { | ||
wait.Add(1) | ||
|
||
go func(idx int, app *apps.LidarrConfig) { | ||
defer wait.Done() | ||
|
||
stat, err := app.GetSystemStatusContext(ctx) | ||
lid[idx] = &conTest{Instance: idx + 1, Up: err == nil, Status: stat} | ||
}(idx, app) | ||
} | ||
} | ||
|
||
func getProwlarrVersion(ctx context.Context, wait *sync.WaitGroup, prowlarrs []*apps.ProwlarrConfig, prl []*conTest) { | ||
for idx, app := range prowlarrs { | ||
wait.Add(1) | ||
|
||
go func(idx int, app *apps.ProwlarrConfig) { | ||
defer wait.Done() | ||
|
||
stat, err := app.GetSystemStatusContext(ctx) | ||
prl[idx] = &conTest{Instance: idx + 1, Up: err == nil, Status: stat} | ||
}(idx, app) | ||
} | ||
} | ||
|
||
func getRadarrVersion(ctx context.Context, wait *sync.WaitGroup, radarrs []*apps.RadarrConfig, rad []*conTest) { | ||
for idx, app := range radarrs { | ||
wait.Add(1) | ||
|
||
go func(idx int, app *apps.RadarrConfig) { | ||
defer wait.Done() | ||
|
||
stat, err := app.GetSystemStatusContext(ctx) | ||
rad[idx] = &conTest{Instance: idx + 1, Up: err == nil, Status: stat} | ||
}(idx, app) | ||
} | ||
} | ||
|
||
func getReadarrVersion(ctx context.Context, wait *sync.WaitGroup, readarrs []*apps.ReadarrConfig, read []*conTest) { | ||
for idx, app := range readarrs { | ||
wait.Add(1) | ||
|
||
go func(idx int, app *apps.ReadarrConfig) { | ||
defer wait.Done() | ||
|
||
stat, err := app.GetSystemStatusContext(ctx) | ||
read[idx] = &conTest{Instance: idx + 1, Up: err == nil, Status: stat} | ||
}(idx, app) | ||
} | ||
} | ||
|
||
func getSonarrVersion(ctx context.Context, wait *sync.WaitGroup, sonarrs []*apps.SonarrConfig, son []*conTest) { | ||
for idx, app := range sonarrs { | ||
wait.Add(1) | ||
|
||
go func(idx int, app *apps.SonarrConfig) { | ||
defer wait.Done() | ||
|
||
stat, err := app.GetSystemStatusContext(ctx) | ||
son[idx] = &conTest{Instance: idx + 1, Up: err == nil, Status: stat} | ||
}(idx, app) | ||
} | ||
} | ||
|
||
func getPlexVersion(ctx context.Context, wait *sync.WaitGroup, plexServer *plex.Server, plx *[]*conTest) { | ||
if !plexServer.Configured() { | ||
return | ||
} | ||
|
||
wait.Add(1) | ||
|
||
go func() { | ||
defer wait.Done() | ||
|
||
stat, err := plexServer.GetInfo(ctx) | ||
if stat == nil { | ||
stat = &plex.PMSInfo{} | ||
} | ||
|
||
*plx = []*conTest{{ | ||
Instance: 1, | ||
Up: err == nil, | ||
Status: map[string]interface{}{ | ||
"friendlyName": stat.FriendlyName, | ||
"version": stat.Version, | ||
"updatedAt": stat.UpdatedAt, | ||
"platform": stat.Platform, | ||
"platformVersion": stat.PlatformVersion, | ||
"size": stat.Size, | ||
"myPlexSigninState": stat.MyPlexSigninState, | ||
"myPlexSubscription": stat.MyPlexSubscription, | ||
"pushNotifications": stat.PushNotifications, | ||
"streamingBrainVersion": stat.StreamingBrainVersion, | ||
"streamingBrainABRVersion": stat.StreamingBrainABRVersion, | ||
}, | ||
}} | ||
}() | ||
} |
Oops, something went wrong.