diff --git a/inspect_submissions.py b/inspect_submissions.py index 45a55ad..c517aa7 100644 --- a/inspect_submissions.py +++ b/inspect_submissions.py @@ -3,6 +3,7 @@ import pandas as pd from datetime import datetime from utils.inspector import hash_submissions, suspicious_by_hash +CSV_DIR = os.path.join(os.getcwd(), 'csv') def main(): submissions_dir_name = ' '.join(sys.argv[1:]) if len(sys.argv) > 1 else exit(f'\nNo submissions dir name given. Provide the name as an argument.\n\nUsage: python {sys.argv[0]} [submissions dir name]\nExample: python {sys.argv[0]} AssignmentX\n') @@ -10,15 +11,15 @@ def main(): if not os.path.isdir(submissions_dir_path): exit(f'Directory {submissions_dir_path} does not exist.\nMake sure "{submissions_dir_name}" exists in "BB_submissions".') else: - hashes_csv_file_path = hash_submissions(submissions_dir_path) - + hashes_csv_file_path = hash_submissions(submissions_dir_path) # generate hashes for all files and return output csv file to load & find duplicate/suspicious hashes csv = pd.read_csv(hashes_csv_file_path) df = pd.DataFrame(csv) # df with all files and their hashes - df_suspicious = suspicious_by_hash(df) # df with all files with duplicate hash, excludes files from the same student id + df_suspicious = suspicious_by_hash(df) # df with all files with duplicate/suspicious hash, excludes files from the same student id + csv_name = f'{submissions_dir_name}_suspicious_{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv' - csv_out = os.path.join('csv', csv_name) + csv_out = os.path.join(CSV_DIR, csv_name) df_suspicious.to_csv(csv_out, index=False) if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/utils/inspector.py b/utils/inspector.py index 6738bef..b1f5511 100644 --- a/utils/inspector.py +++ b/utils/inspector.py @@ -8,7 +8,7 @@ CSV_DIR = os.path.join(os.getcwd(), 'csv') def get_hashes_in_dir(dir_path: str) -> list: hash_list = [] - for subdir, dirs, files in os.walk(dir_path): # Loop through all files in the directory and generate hashes + for subdir, dirs, files in os.walk(dir_path): # loop through all files in the directory and generate hashes for file in files: filepath = os.path.join(subdir, file) with open(filepath, 'rb') as f: @@ -17,33 +17,34 @@ def get_hashes_in_dir(dir_path: str) -> list: return hash_list -def hash_submissions(submissions_dir_path: str): +def hash_submissions(submissions_dir_path: str) -> str: os.makedirs(CSV_DIR, exist_ok=True) - submissions_dir_name = os.path.abspath(submissions_dir_path).split(os.path.sep)[-1] + submissions_dir_name = os.path.abspath(submissions_dir_path).split(os.path.sep)[-1] # get name of submission/assignment by separating path and use rightmost part csv_file_name = f'{submissions_dir_name}_file_hashes_{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv' csv_file_path = os.path.join(CSV_DIR, csv_file_name) - with open(csv_file_path, 'w', newline='') as csvfile: # Open the output CSV file for writing + with open(csv_file_path, 'w', newline='') as csvfile: # open the output CSV file for writing fieldnames = ['Student ID', 'filepath', 'filename', 'sha256 hash'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() - for student_dir_name in os.listdir(submissions_dir_path): + for student_dir_name in os.listdir(submissions_dir_path): # loop through each student dir to get hashes for all files per student student_dir_path = os.path.join(submissions_dir_path, student_dir_name) - hashes_dict = get_hashes_in_dir(student_dir_path) + hashes_dict = get_hashes_in_dir(student_dir_path) # dict with hashes for all student files for d in hashes_dict: d.update({'Student ID': student_dir_name}) # update hash records with student id writer.writerows(hashes_dict) return csv_file_path + def get_suspicious_hashes(df: pd.DataFrame) -> list: - drop_columns = ['filepath', 'filename'] + drop_columns = ['filepath', 'filename'] # only need to keep 'student id' and 'sha256 hash' for groupby later df = df.drop(columns=drop_columns).sort_values('sha256 hash') # clear not needed colums & sort by hash duplicate_hash = df.loc[df.duplicated(subset=['sha256 hash'], keep=False), :] # all files with duplicate hash - incl. files from the same student id - hash_with_multiple_student_ids = duplicate_hash.groupby('sha256 hash').agg(lambda x: len(x.unique())>1) # true if more than 1 unique student ids (= multiple student ids with same hash), false if unique (= same student id re-submitting with the same hash) + hash_with_multiple_student_ids = duplicate_hash.groupby('sha256 hash').agg(lambda x: len(x.unique())>1) # true if more than 1 unique student ids (= files with the same hash by multiple student ids), false if unique student id (= files from the same student id with the same hash) - suspicious_hashes_list = hash_with_multiple_student_ids[hash_with_multiple_student_ids['Student ID']==True].index.to_list() # list with duplicate hashes - only if different student id (doesn't include attempts from same student id) + suspicious_hashes_list = hash_with_multiple_student_ids[hash_with_multiple_student_ids['Student ID']==True].index.to_list() # list with duplicate hashes - only if different student id (doesn't include files from same student id) return suspicious_hashes_list