Skip to content

Commit

Permalink
Remove elaluate dependecny.
Browse files Browse the repository at this point in the history
  • Loading branch information
knc6 committed Oct 13, 2023
1 parent a2cb2c3 commit cc2cb43
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 18 deletions.
32 changes: 19 additions & 13 deletions jarvis_leaderboard/rebuild.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,6 @@ def get_metric_value(
csv_path="../contributions/alignn_model/AI-SinglePropertyPrediction-formation_energy_peratom-dft_3d-test-mae.csv.zip",
plot_filename=None,
):

fname = csv_path.split("/")[-1].split(".csv.zip")[0]
contribution = csv_path.split("/")[-2]
temp = fname.split("-")
Expand Down Expand Up @@ -322,19 +321,30 @@ def get_metric_value(
)
results["random_guessing_performance"] = random_guessing_performance
if metric == "rouge":
import evaluate

# Due to dependency conflicts now avoiding evaluate package
# import evaluate
# from datasets import load_metric
# metric = load_metric("rouge")
# TODO: merge with benchmark instead of using target from csv.zip
rouge_score = evaluate.load("rouge")
scores = rouge_score.compute(
predictions=df["prediction"], references=df["actual"]
)
# rouge_score = evaluate.load("rouge")
# scores = rouge_score.compute(
# predictions=df["prediction"], references=df["actual"]
# )
# scores = rouge_score.compute(predictions=csv_data['prediction'],references=csv_data['target'])
rouge = scores["rouge1"]
# rouge = scores["rouge1"]
# rouge=(calc_rouge_scores(df['target'],df['prediction']))['rouge1']
results["res"] = round(rouge, 4)
# results["res"] = round(rouge, 4)
# print('rouge eval',rouge)
from rouge import Rouge

rouge = Rouge()
hypothesis = df["prediction"]
reference = df["actual"]
scores = rouge.get_scores(hypothesis, reference, avg=True)["rouge-1"][
"r"
]
results["res"] = round(scores, 4)
print("rouge scores", scores)
if metric == "rmse" and subcat == "AtomGen":
print("AtomGen")
from pymatgen.analysis.structure_matcher import StructureMatcher
Expand Down Expand Up @@ -372,7 +382,6 @@ def get_metric_value(


def check_metadata_json_exists():

search = root_dir + "/contributions"
all_dirs = []
all_dirs_meta = []
Expand Down Expand Up @@ -448,7 +457,6 @@ def old_check_metadata_info_exists():


def check_run_sh_exists():

search = root_dir + "/contributions"
all_dirs = []
all_dirs_meta = []
Expand Down Expand Up @@ -477,7 +485,6 @@ def check_run_sh_exists():


def check_at_least_one_csv_zip_exists():

search = root_dir + "/contributions"
all_dirs = []
all_dirs_meta = []
Expand All @@ -497,7 +504,6 @@ def check_at_least_one_csv_zip_exists():


def check_json_zip_exists_for_csv_zip():

search = root_dir + "/contributions"
# print('search',search)
problem_csv = []
Expand Down
11 changes: 6 additions & 5 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,18 @@
"jarvis-tools>=2021.07.19",
"scikit-learn>=0.24.1",
"pandas>=1.2.4",
"rouge>=1.0.1",
"mkdocs==1.5.2",
"datasets==2.14.5",
"mkdocs-material==9.0.5",
"pydantic==2.3.0",
"markdown==3.2.1",
"absl-py==1.4.0",
"evaluate==0.4.0",
"nltk==3.8.1",
"rouge-score==0.1.2",
"fsspec==2023.9.0",
"aiohttp==3.8.5",
# "evaluate==0.4.0",
# "rouge-score==0.1.2",
# "fsspec==2023.9.0",
# "aiohttp==3.8.5",
# "datasets==2.14.5",
# "alignn>=2022.10.23",
# "flake8>=3.9.1",
# "pycodestyle>=2.7.0",
Expand Down

0 comments on commit cc2cb43

Please sign in to comment.