{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "id": "dQAr2gM1_wFl" }, "outputs": [], "source": [ "import pandas as pd" ] }, { "cell_type": "code", "source": [ "\n", "df = pd.read_parquet('evaluation_results-00000-of-00001.parquet', engine='pyarrow')\n", "df1 = pd.read_parquet('boostrap_confidence_intervals-00000-of-00001.parquet', engine='pyarrow')" ], "metadata": { "id": "Zlng_K58AFsV" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "def filtrar_respostas(df, modelo):\n", " filtro = df[df['id'].str.startswith(modelo)]\n", " dic = {'C1': \"\", 'C2': \"\", 'C3': \"\", 'C4': \"\", 'C5': \"\"}\n", " for idx, row in filtro.iterrows():\n", " for key in dic:\n", " if key in row['id']:\n", " print_string = f\"{row['Micro_F1']:.2f} & {row['Weighted_F1']:.2f} & {row['QWK']:.2f} &\"\n", " dic[key] += print_string\n", " return dic\n", "\n", "# def printar_final(dic, modelo):\n", "# string = f\"{modelo} \"\n", "# for key in dic:\n", "# string += f\"& {dic[key]} \"\n", "# print(f\"{string[:-2]} \\\\\\\\\")\n", "\n", "def filtrar_dfs(perf, boot, modelo, final=\"\"):\n", " global SELECT\n", " filtro_perf = perf[perf['id'].str.startswith(modelo) & perf['id'].str.endswith(final)]\n", " filtro_boot = boot[boot['experiment_id'].str.startswith(modelo) & boot['experiment_id'].str.endswith(final)]\n", " dic = {'C1': {}, 'C2': {}, 'C3': {}, 'C4': {}, 'C5': {}}\n", " string = \"\"\n", " for idx, row in filtro_perf.iterrows():\n", " for key in dic:\n", " if key in row['id']:\n", " dic[key]['Micro_F1'] = row['Micro_F1']\n", " dic[key]['Weighted_F1'] = row['Weighted_F1']\n", " dic[key]['QWK'] = row['QWK']\n", " for idx, row in filtro_boot.iterrows():\n", " for key in dic:\n", " if key in row['experiment_id']:\n", " dic[key]['QWK_mean'] = row['QWK_mean']\n", " dic[key][SELECT[1]] = row[SELECT[1]]\n", " dic[key][SELECT[0]] = row[SELECT[0]]\n", " #print(dic)\n", " string = \"\"\n", " for key in dic:\n", " string += f\"{dic[key][SELECT[0]]:.2f} & {dic[key][SELECT[1]]:.2f} &\"\n", " print(f\"{modelo}-{final} & {string[:-2]} \\\\\\\\\")\n", "\n", "modelos = [\n", " #Encoders\n", " (\"jbcs2025_mbert_base\", \"\"), (\"jbcs2025_bertimbau_base-\", \"\"), (\"jbcs2025_bertimbau-large\", \"\"),\n", " #Decoders\n", " #(\"jbcs2025_llama31_8b-balanced\", \"essay_only\"), (\"jbcs2025_phi35-balanced\", \"essay_only\"), (\"jbcs2025_phi4-balanced\", \"essay_only\"),\n", " # (\"jbcs2025_Llama-3.1-8B-llama31\", \"full_context\"), (\"jbcs2025_Phi-3.5-mini-instruct\", \"full_context\"), (\"jbcs2025_phi-4-phi4\", \"full_context\"),\n", " #Sabias\n", " (\"sabia-3-zero-shot\", \"essay_only\"), (\"sabia-3-zero-shot\", \"full_context\"),\n", " #(\"sabia-3-grader-zero-shot\", \"essay_only\"),\n", " (\"sabia-3-grader-zero-shot\", \"full_context\"),\n", " #Gpts\n", " (\"gpt-4o-2024-11-20-zero-shot\", \"essay_only\"), (\"gpt-4o-2024-11-20-zero-shot\", \"full_context\"),\n", " (\"gpt-4o-2024-11-20-grader-zero-shot\", \"essay_only\"), (\"gpt-4o-2024-11-20-grader-zero-shot\", \"full_context\"),\n", " #Deepseeks\n", " (\"deepseek-reasoner-zero-shot-\", \"essay_only\"), (\"deepseek-reasoner-zero-shot-\", \"full_context\"),\n", " (\"Deepseek-reasoner-grader-zero-shot-\", \"essay_only\") ]\n", "SELECT = [\"QWK_lower_95ci\", \"QWK_upper_95ci\"]\n", "for (modelo, final) in modelos:\n", " filtrar_dfs(df, df1, modelo, final=final)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "2_4fdVNNARGi", "outputId": "6e6b6032-97e7-495d-993c-9f5c8a8f49d3" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "jbcs2025_mbert_base- & 0.33 & 0.56 &0.00 & 0.29 &0.10 & 0.42 &0.12 & 0.42 &0.46 & 0.67 \\\\\n", "jbcs2025_bertimbau_base-- & 0.58 & 0.76 &0.28 & 0.55 &0.21 & 0.48 &0.51 & 0.73 &0.34 & 0.59 \\\\\n", "jbcs2025_bertimbau-large- & 0.61 & 0.79 &0.27 & 0.57 &0.13 & 0.40 &0.46 & 0.67 &0.35 & 0.60 \\\\\n", "sabia-3-zero-shot-essay_only & 0.61 & 0.75 &-0.03 & 0.06 &0.16 & 0.43 &0.39 & 0.62 &0.37 & 0.63 \\\\\n", "sabia-3-zero-shot-full_context & 0.47 & 0.66 &0.31 & 0.60 &0.32 & 0.56 &0.23 & 0.52 &0.40 & 0.65 \\\\\n", "sabia-3-grader-zero-shot-full_context & 0.40 & 0.58 &0.35 & 0.62 &0.42 & 0.65 &0.39 & 0.64 &0.34 & 0.63 \\\\\n", "gpt-4o-2024-11-20-zero-shot-essay_only & 0.43 & 0.58 &0.05 & 0.34 &0.24 & 0.51 &0.40 & 0.60 &0.41 & 0.67 \\\\\n", "gpt-4o-2024-11-20-zero-shot-full_context & 0.39 & 0.56 &0.38 & 0.63 &0.42 & 0.63 &0.38 & 0.58 &0.20 & 0.49 \\\\\n", "gpt-4o-2024-11-20-grader-zero-shot-essay_only & 0.40 & 0.56 &0.15 & 0.42 &0.15 & 0.41 &0.40 & 0.60 &0.41 & 0.66 \\\\\n", "gpt-4o-2024-11-20-grader-zero-shot-full_context & 0.47 & 0.63 &0.43 & 0.68 &0.32 & 0.56 &0.39 & 0.60 &0.33 & 0.61 \\\\\n", "deepseek-reasoner-zero-shot--essay_only & 0.27 & 0.43 &-0.14 & 0.11 &0.23 & 0.53 &0.41 & 0.61 &0.41 & 0.67 \\\\\n", "deepseek-reasoner-zero-shot--full_context & 0.27 & 0.43 &0.30 & 0.53 &0.65 & 0.80 &0.39 & 0.58 &0.45 & 0.69 \\\\\n", "Deepseek-reasoner-grader-zero-shot--essay_only & 0.36 & 0.51 &-0.01 & 0.08 &0.23 & 0.54 &0.37 & 0.56 &0.40 & 0.68 \\\\\n" ] } ] }, { "cell_type": "code", "source": [ "SELECT = [\"Macro_F1_lower_95ci\", \"Macro_F1_upper_95ci\"]\n", "for (modelo, final) in modelos:\n", " filtrar_dfs(df, df1, modelo, final=final)" ], "metadata": { "id": "dhyTkpR2ARwp", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "68fd310f-a620-4e32-ea7a-b1855c592f3b" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "jbcs2025_mbert_base- & 0.26 & 0.46 &0.16 & 0.33 &0.11 & 0.23 &0.13 & 0.29 &0.25 & 0.40 \\\\\n", "jbcs2025_bertimbau_base-- & 0.36 & 0.62 &0.22 & 0.40 &0.20 & 0.37 &0.29 & 0.59 &0.15 & 0.27 \\\\\n", "jbcs2025_bertimbau-large- & 0.39 & 0.66 &0.21 & 0.37 &0.14 & 0.29 &0.23 & 0.43 &0.23 & 0.41 \\\\\n", "jbcs2025_llama31_8b-balanced-essay_only & 0.37 & 0.64 &0.16 & 0.29 &0.21 & 0.38 &0.27 & 0.52 &0.16 & 0.29 \\\\\n", "jbcs2025_phi35-balanced-essay_only & 0.42 & 0.70 &0.16 & 0.37 &0.20 & 0.38 &0.24 & 0.48 &0.22 & 0.34 \\\\\n", "jbcs2025_phi4-balanced-essay_only & 0.39 & 0.65 &0.23 & 0.39 &0.15 & 0.31 &0.23 & 0.40 &0.14 & 0.25 \\\\\n", "jbcs2025_Llama-3.1-8B-llama31-full_context & 0.35 & 0.56 &0.17 & 0.32 &0.19 & 0.34 &0.23 & 0.46 &0.14 & 0.35 \\\\\n", "jbcs2025_Phi-3.5-mini-instruct-full_context & 0.09 & 0.18 &0.09 & 0.18 &0.33 & 0.56 &0.21 & 0.43 &0.19 & 0.33 \\\\\n", "jbcs2025_phi-4-phi4-full_context & 0.30 & 0.53 &0.18 & 0.31 &0.22 & 0.35 &0.24 & 0.40 &0.25 & 0.45 \\\\\n" ] } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "zyaRKx5oChUn" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "from torch import nn\n", "import torch" ], "metadata": { "id": "cB-6u1yyChfR" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "m = nn.Sigmoid()\n", "input = torch.randn(2)\n", "print(input)\n", "output = m(torch.tensor(-100))\n", "output" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "IJ4ySLhpClfe", "outputId": "b48b901d-fdec-4515-830c-8a1069d2b807" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "tensor([ 1.5602, -1.1152])\n" ] }, { "output_type": "execute_result", "data": { "text/plain": [ "tensor(0.)" ] }, "metadata": {}, "execution_count": 6 } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "kvuSSIcCCuOa" }, "execution_count": null, "outputs": [] } ] }