Skip to content

Commit

Permalink
Merge pull request #192 from ionite34/fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
ionite34 authored Aug 10, 2023
2 parents 88ec80c + 9ea7cbd commit bec5103
Show file tree
Hide file tree
Showing 3 changed files with 69 additions and 22 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,11 @@ All notable changes to Stability Matrix will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning 2.0](https://semver.org/spec/v2.0.0.html).

## v2.1.2

### Changed
- SD.Next install now uses ROCm PyTorch backend on Linux AMD GPU machines for better performance over DirectML

## v2.1.1

### Added
Expand Down
52 changes: 30 additions & 22 deletions StabilityMatrix.Core/Models/Packages/VladAutomatic.cs
Original file line number Diff line number Diff line change
Expand Up @@ -98,19 +98,21 @@ public VladAutomatic(IGithubApiCache githubApi, ISettingsManager settingsManager
{
Name = "Use DirectML if no compatible GPU is detected",
Type = LaunchOptionType.Bool,
InitialValue = !HardwareHelper.HasNvidiaGpu() && HardwareHelper.HasAmdGpu(),
InitialValue = PreferDirectML(),
Options = new() { "--use-directml" }
},
new()
{
Name = "Force use of Nvidia CUDA backend",
Type = LaunchOptionType.Bool,
InitialValue = HardwareHelper.HasNvidiaGpu(),
Options = new() { "--use-cuda" }
},
new()
{
Name = "Force use of AMD ROCm backend",
Type = LaunchOptionType.Bool,
InitialValue = PreferRocm(),
Options = new() { "--use-rocm" }
},
new()
Expand All @@ -136,6 +138,16 @@ public VladAutomatic(IGithubApiCache githubApi, ISettingsManager settingsManager

public override string ExtraLaunchArguments => "";

// Set ROCm for default if AMD and Linux
private static bool PreferRocm() => !HardwareHelper.HasNvidiaGpu()
&& HardwareHelper.HasAmdGpu()
&& Compat.IsLinux;

// Set DirectML for default if AMD and Windows
private static bool PreferDirectML() => !HardwareHelper.HasNvidiaGpu()
&& HardwareHelper.HasAmdGpu()
&& Compat.IsWindows;

public override Task<string> GetLatestVersion() => Task.FromResult("master");

public override async Task<IEnumerable<PackageVersion>> GetAllVersions(bool isReleaseMode = true)
Expand All @@ -150,42 +162,38 @@ public override async Task<IEnumerable<PackageVersion>> GetAllVersions(bool isRe

public override async Task InstallPackage(IProgress<ProgressReport>? progress = null)
{
progress?.Report(new ProgressReport(-1f, "Installing dependencies...", isIndeterminate: true));
progress?.Report(new ProgressReport(-1f, "Installing package...", isIndeterminate: true));
// Setup venv
var venvRunner = new PyVenvRunner(Path.Combine(InstallLocation, "venv"));
venvRunner.WorkingDirectory = InstallLocation;
if (!venvRunner.Exists())
venvRunner.EnvironmentVariables = SettingsManager.Settings.EnvironmentVariables;

await venvRunner.Setup().ConfigureAwait(false);

// Run initial install
if (HardwareHelper.HasNvidiaGpu())
{
await venvRunner.Setup().ConfigureAwait(false);
// CUDA
await venvRunner.CustomInstall("launch.py --use-cuda --debug --test", OnConsoleOutput)
.ConfigureAwait(false);
}

// Install torch / xformers based on gpu info
var gpus = HardwareHelper.IterGpuInfo().ToList();
if (gpus.Any(g => g.IsNvidia))
else if (PreferRocm())
{
Logger.Info("Starting torch install (CUDA)...");
await venvRunner.PipInstall(PyVenvRunner.TorchPipInstallArgsCuda, OnConsoleOutput)
// ROCm
await venvRunner.CustomInstall("launch.py --use-rocm --debug --test", OnConsoleOutput)
.ConfigureAwait(false);

Logger.Info("Installing xformers...");
await venvRunner.PipInstall("xformers", OnConsoleOutput).ConfigureAwait(false);
}
else if (gpus.Any(g => g.IsAmd))
else if (PreferDirectML())
{
Logger.Info("Starting torch install (DirectML)...");
await venvRunner.PipInstall(PyVenvRunner.TorchPipInstallArgsDirectML, OnConsoleOutput)
// DirectML
await venvRunner.CustomInstall("launch.py --use-directml --debug --test", OnConsoleOutput)
.ConfigureAwait(false);
}
else
{
Logger.Info("Starting torch install (CPU)...");
await venvRunner.PipInstall(PyVenvRunner.TorchPipInstallArgsCpu, OnConsoleOutput)
await venvRunner.CustomInstall("launch.py --debug --test", OnConsoleOutput)
.ConfigureAwait(false);
}

// Install requirements file
Logger.Info("Installing requirements.txt");
await venvRunner.PipInstall($"-r requirements.txt", OnConsoleOutput).ConfigureAwait(false);

progress?.Report(new ProgressReport(1, isIndeterminate: false));
}
Expand Down
34 changes: 34 additions & 0 deletions StabilityMatrix.Core/Python/PyVenvRunner.cs
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,40 @@ public async Task PipInstall(string args, Action<ProcessOutput>? outputDataRecei
);
}
}

/// <summary>
/// Run a custom install command. Waits for the process to exit.
/// workingDirectory defaults to RootPath.
/// </summary>
public async Task CustomInstall(string args, Action<ProcessOutput>? outputDataReceived = null)
{
// Record output for errors
var output = new StringBuilder();

var outputAction =
outputDataReceived == null
? null
: new Action<ProcessOutput>(s =>
{
Logger.Debug($"Install output: {s.Text}");
// Record to output
output.Append(s.Text);
// Forward to callback
outputDataReceived(s);
});

SetPyvenvCfg(PyRunner.PythonDir);
RunDetached(args, outputAction);
await Process.WaitForExitAsync().ConfigureAwait(false);

// Check return code
if (Process.ExitCode != 0)
{
throw new ProcessException(
$"install script failed with code {Process.ExitCode}: {output.ToString().ToRepr()}"
);
}
}

/// <summary>
/// Run a command using the venv Python executable and return the result.
Expand Down

0 comments on commit bec5103

Please sign in to comment.