From 54efc480873cd23e21248a43d779daef73b540c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EC=84=9D=ED=99=98?= <140064690+ihatetmat@users.noreply.github.com> Date: Tue, 28 May 2024 21:45:03 +0900 Subject: [PATCH] Update main.py --- source/main.py | 1 - 1 file changed, 1 deletion(-) diff --git a/source/main.py b/source/main.py index ab2a65f..ad09c95 100644 --- a/source/main.py +++ b/source/main.py @@ -24,7 +24,6 @@ def pad_sequences(input_ids, maxlen) : # 입력 데이터 변환 def convert_input_data(sentences): sentences = ["[CLS] " + str(sentence) + " [SEP]" for sentence in sentences] - print(sentences) # BERT의 토크나이저로 문장을 토큰으로 분리 tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences] # 입력 토큰의 최대 시퀀스 길이