Skip to content

Commit

Permalink
Update README.md
Browse files Browse the repository at this point in the history
  • Loading branch information
bb-splunk committed Oct 1, 2024
1 parent a2d110f commit 1c43bce
Show file tree
Hide file tree
Showing 12 changed files with 114 additions and 122 deletions.
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
repos:
- repo: https://github.com/phantomcyber/dev-cicd-tools
rev: v1.16
rev: v1.22
hooks:
- id: org-hook
- id: package-app-dependencies
- repo: https://github.com/Yelp/detect-secrets
rev: v1.4.0
rev: v1.5.0
hooks:
- id: detect-secrets
args: ['--no-verify', '--exclude-files', '^microsoftsqlserver.json$']
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Connector Version: 2.3.2
Product Vendor: Microsoft
Product Name: Microsoft SQL Server
Product Version Supported (regex): ".\*"
Minimum Product Version: 6.1.1
Minimum Product Version: 6.2.1

This app supports investigative actions against a Microsoft SQL Server

Expand Down
8 changes: 4 additions & 4 deletions microsoftsqlserver.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
"utctime_updated": "2024-03-14T13:57:37.000000Z",
"package_name": "phantom_microsoftsqlserver",
"main_module": "microsoftsqlserver_connector.py",
"min_phantom_version": "6.1.1",
"min_phantom_version": "6.2.1",
"app_wizard_version": "1.0.0",
"python_version": "3",
"fips_compliant": true,
Expand Down Expand Up @@ -894,16 +894,16 @@
"wheel": [
{
"module": "pymssql",
"input_file": "wheels/py36/pymssql-2.2.11-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
"input_file": "wheels/py36/pymssql-2.2.11-cp36-cp36m-manylinux_2_28_x86_64.whl"
}
]
},
"pip39_dependencies": {
"wheel": [
{
"module": "pymssql",
"input_file": "wheels/py39/pymssql-2.2.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
"input_file": "wheels/py39/pymssql-2.2.11-cp39-cp39-manylinux_2_28_x86_64.whl"
}
]
}
}
}
138 changes: 61 additions & 77 deletions microsoftsqlserver_connector.py

Large diffs are not rendered by default.

74 changes: 37 additions & 37 deletions microsoftsqlserver_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,30 +19,30 @@


def display_query_results(provides, all_results, context):
context['results'] = results = []
context["results"] = results = []
for summary, action_results in all_results:
for result in action_results:

ctx_result = {}
ctx_result['param'] = result.get_param()
ctx_result["param"] = result.get_param()

add_datasets_as_rows = ctx_result['param'].get('add_datasets_as_rows', False)
add_datasets_as_rows = ctx_result["param"].get("add_datasets_as_rows", False)
# ctx_result['add_datasets_as_rows'] = add_datasets_as_rows
# ctx_result['description_headers'] = ["name", "type_code", "display_size", "internal_size", "precision", "scale", "null_ok"]

data = reformat_data(result.get_data(),
["name", "type_code", "display_size", "internal_size", "precision", "scale", "null_ok"],
add_datasets_as_rows)
data = reformat_data(
result.get_data(), ["name", "type_code", "display_size", "internal_size", "precision", "scale", "null_ok"], add_datasets_as_rows
)

if (data):
ctx_result['tables'] = data
if data:
ctx_result["tables"] = data

# ctx_result['headers'] = data[0]['headers']
# ctx_result['rows'] = data[0]['rows']

summary = result.get_summary()
if (summary):
ctx_result['summary'] = summary
if summary:
ctx_result["summary"] = summary

results.append(ctx_result)

Expand All @@ -59,30 +59,30 @@ def reformat_data(data, description_headers, add_datasets_as_rows):

newdataset = {}
ret += [newdataset]
newdataset['index'] = index
newdataset['headers'] = sorted(dataset['dataset'][0].keys())
newdataset['dataset'] = []
newdataset["index"] = index
newdataset["headers"] = sorted(dataset["dataset"][0].keys())
newdataset["dataset"] = []

for row in dataset['dataset']:
for row in dataset["dataset"]:

newrow = []
newdataset['dataset'] += [newrow]
newdataset["dataset"] += [newrow]

for col in newdataset['headers']:
for col in newdataset["headers"]:
newrow += [row[col]]

newdataset['description'] = []
for name in sorted(dataset['description'].keys()):
newdataset["description"] = []
for name in sorted(dataset["description"].keys()):

newrow = []
newdataset['description'] += [newrow]
newdataset["description"] += [newrow]
newrow += [name]

for i, col in enumerate(description_headers):
if i:
newrow += [dataset['description'][name].get(col, "")]
newrow += [dataset["description"][name].get(col, "")]

newdataset['dump'] = json.dumps(newdataset)
newdataset["dump"] = json.dumps(newdataset)

else:

Expand All @@ -96,38 +96,38 @@ def reformat_data(data, description_headers, add_datasets_as_rows):
headers = newheaders
newdataset = {}
ret += [newdataset]
newdataset['index'] = index
newdataset["index"] = index
index += 1
newdataset['headers'] = sorted(row.keys())
newdataset['dataset'] = []
newdataset["headers"] = sorted(row.keys())
newdataset["dataset"] = []

newrow = []
newdataset['dataset'] += [newrow]
newdataset["dataset"] += [newrow]
for col in headers:
newrow += [row[col]]

newdataset['dump'] = json.dumps(newdataset)
newdataset["dump"] = json.dumps(newdataset)

newret = []
for i, dataset in enumerate(ret):

if 'description' in dataset:
if "description" in dataset:
newdataset = {}
newret += [newdataset]
newdataset['name'] = "Description for Dataset #" + str(i)
newdataset['headers'] = description_headers
newdataset['rows'] = dataset['description']
for r, row in enumerate(newdataset['rows']):
newdataset["name"] = "Description for Dataset #" + str(i)
newdataset["headers"] = description_headers
newdataset["rows"] = dataset["description"]
for r, row in enumerate(newdataset["rows"]):
for c, cell in enumerate(row):
newdataset['rows'][r][c] = { "value": cell }
newdataset["rows"][r][c] = {"value": cell}

newdataset = {}
newret += [newdataset]
newdataset['name'] = "Dataset #" + str(i)
newdataset['headers'] = dataset['headers']
newdataset['rows'] = dataset['dataset']
for r, row in enumerate(newdataset['rows']):
newdataset["name"] = "Dataset #" + str(i)
newdataset["headers"] = dataset["headers"]
newdataset["rows"] = dataset["dataset"]
for r, row in enumerate(newdataset["rows"]):
for c, cell in enumerate(row):
newdataset['rows'][r][c] = { "value": cell }
newdataset["rows"][r][c] = {"value": cell}

return newret
7 changes: 7 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
[tool.black]
line-length = 145
target-version = ['py39']
verbose = true

[tool.isort]
line_length = 145
1 change: 1 addition & 0 deletions release_notes/unreleased.md
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
**Unreleased**
* Updated dependencies [PAPP-34757]
2 changes: 1 addition & 1 deletion tox.ini
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[flake8]
max-line-length = 145
max-complexity = 28
extend-ignore = F403,E128,E126,E111,E121,E127,E731,E201,E202,F405,E722,D,W292
extend-ignore = F403,E128,E126,E121,E127,E731,E201,E202,E203,E701,F405,E722,D

[isort]
line_length = 145
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

0 comments on commit 1c43bce

Please sign in to comment.