!pip install transformers tensorflow

import tensorflow as tf
from transformers import TFAutoModelForSeq2SeqLM, AutoTokenizer

# Load the pre-trained model and tokenizer
model_name = "t5-base"  # Using T5 model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_name)

# Function to summarize text
def summarize(text, max_length=130, min_length=30, length_penalty=2.0, num_beams=4):
    # Tokenize the input text
    inputs = tokenizer.encode("summarize: " + text, return_tensors="tf", max_length=512, truncation=True)
    
    # Generate summary using the model
    summary_ids = model.generate(inputs, max_length=max_length, min_length=min_length, length_penalty=length_penalty, num_beams=num_beams, early_stopping=True)
    
    # Decode the summary
    summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
    return summary

# Example text to summarize
text = """
    The quick brown fox jumps over the lazy dog. This is a classic example sentence used in typography and design to 
    demonstrate fonts and layouts. It contains all the letters of the English alphabet, making it a pangram. This sentence 
    has been used for decades in various contexts and continues to be a popular example for showcasing text features.
"""

# Summarize the text
summary = summarize(text)
print("Original Text: \n", text)
print("\nSummary: \n", summary)


######################

import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering

# Load the pre-trained model and tokenizer
model_name = "distilbert-base-uncased-distilled-squad"  # Using DistilBERT model fine-tuned on SQuAD
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = TFAutoModelForQuestionAnswering.from_pretrained(model_name)

# Function to perform question answering
def answer_question(question, context):
    inputs = tokenizer.encode_plus(question, context, add_special_tokens=True, return_tensors="tf")
    input_ids = inputs["input_ids"].numpy()[0]

    # Get the answer
    outputs = model(inputs)
    answer_start = tf.argmax(outputs.start_logits, axis=1).numpy()[0]
    answer_end = tf.argmax(outputs.end_logits, axis=1).numpy()[0] + 1

    answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
    return answer

# Example context and question
context = """
    The quick brown fox jumps over the lazy dog. This is a classic example sentence used in typography and design to 
    demonstrate fonts and layouts. It contains all the letters of the English alphabet, making it a pangram. This sentence 
    has been used for decades in various contexts and continues to be a popular example for showcasing text features.
"""
question = "What is the quick brown fox doing?"

# Get the answer
answer = answer_question(question, context)
print("Question: \n", question)
print("\nAnswer: \n", answer)