diff --git a/public/pdfs/roadmaps/prompt-engineering.pdf b/public/pdfs/roadmaps/prompt-engineering.pdf index 0390392f7..fb4f1e7b0 100644 Binary files a/public/pdfs/roadmaps/prompt-engineering.pdf and b/public/pdfs/roadmaps/prompt-engineering.pdf differ diff --git a/public/roadmaps/prompt-engineering.png b/public/roadmaps/prompt-engineering.png new file mode 100644 index 000000000..cac124931 Binary files /dev/null and b/public/roadmaps/prompt-engineering.png differ diff --git a/scripts/editor-roadmap-content-json.ts b/scripts/editor-roadmap-content-json.ts index 8429ed9f8..ccc16c0c0 100644 --- a/scripts/editor-roadmap-content-json.ts +++ b/scripts/editor-roadmap-content-json.ts @@ -21,7 +21,7 @@ export const allowedLinkTypes = [ 'podcast', ] as const; -async function fetchRoadmapJson(roadmapId: string) { +export async function fetchRoadmapJson(roadmapId: string) { const response = await fetch( `https://roadmap.sh/api/v1-official-roadmap/${roadmapId}`, ); diff --git a/scripts/editor-roadmap-dirs.ts b/scripts/editor-roadmap-dirs.ts index 2f02320e0..26857b171 100644 --- a/scripts/editor-roadmap-dirs.ts +++ b/scripts/editor-roadmap-dirs.ts @@ -1,11 +1,10 @@ +import type { Node } from '@roadmapsh/editor'; +import matter from 'gray-matter'; import fs from 'node:fs/promises'; import path from 'node:path'; import { fileURLToPath } from 'node:url'; -import type { Node } from '@roadmapsh/editor'; -import matter from 'gray-matter'; import type { RoadmapFrontmatter } from '../src/lib/roadmap'; import { slugify } from '../src/lib/slugger'; -import { httpGet } from '../src/lib/http'; // ERROR: `__dirname` is not defined in ES module scope // https://iamwebwiz.medium.com/how-to-fix-dirname-is-not-defined-in-es-module-scope-34d94a86694d @@ -49,12 +48,27 @@ if (roadmapFrontmatter.renderer !== 'editor') { process.exit(1); } -const { response: roadmapContent, error } = await httpGet( - `${import.meta.env.PUBLIC_API_URL}/v1-official-roadmap/${roadmapId}`, -); +export async function fetchRoadmapJson(roadmapId: string) { + const response = await fetch( + `https://roadmap.sh/api/v1-official-roadmap/${roadmapId}`, + ); -if (error) { - console.error(error); + if (!response.ok) { + throw new Error(`Failed to fetch roadmap json: ${response.statusText}`); + } + + const data = await response.json(); + if (data.error) { + throw new Error(`Failed to fetch roadmap json: ${data.error}`); + } + + return data; +} + +const roadmapContent = await fetchRoadmapJson(roadmapId); + +if (!roadmapContent) { + console.error(`Failed to fetch roadmap json: ${roadmapId}`); process.exit(1); } diff --git a/src/data/roadmaps/prompt-engineering/content/agents@Pw5LWA9vNRY0N2M0FW16f.md b/src/data/roadmaps/prompt-engineering/content/agents@Pw5LWA9vNRY0N2M0FW16f.md new file mode 100644 index 000000000..e2f83299f --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/agents@Pw5LWA9vNRY0N2M0FW16f.md @@ -0,0 +1 @@ +# Agents \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/ai-red-teaming@Wvu9Q_kNhH1_JlOgxAjP6.md b/src/data/roadmaps/prompt-engineering/content/ai-red-teaming@Wvu9Q_kNhH1_JlOgxAjP6.md new file mode 100644 index 000000000..5fda0cdfe --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/ai-red-teaming@Wvu9Q_kNhH1_JlOgxAjP6.md @@ -0,0 +1 @@ +# AI Red Teaming \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/ai-vs-agi@Sj1CMZzZp8kF-LuHcd_UU.md b/src/data/roadmaps/prompt-engineering/content/ai-vs-agi@Sj1CMZzZp8kF-LuHcd_UU.md new file mode 100644 index 000000000..7f97e9943 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/ai-vs-agi@Sj1CMZzZp8kF-LuHcd_UU.md @@ -0,0 +1 @@ +# AI vs AGI \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/anthropic@V8pDOwrRKKcHBTd4qlSsH.md b/src/data/roadmaps/prompt-engineering/content/anthropic@V8pDOwrRKKcHBTd4qlSsH.md new file mode 100644 index 000000000..895ae718c --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/anthropic@V8pDOwrRKKcHBTd4qlSsH.md @@ -0,0 +1 @@ +# Anthropic \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/automatic-prompt-engineering@diHNCiuKHeMVgvJ4OMwVh.md b/src/data/roadmaps/prompt-engineering/content/automatic-prompt-engineering@diHNCiuKHeMVgvJ4OMwVh.md new file mode 100644 index 000000000..e675da613 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/automatic-prompt-engineering@diHNCiuKHeMVgvJ4OMwVh.md @@ -0,0 +1 @@ +# Automatic Prompt Engineering \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/basic-llm/how-llms-built.md b/src/data/roadmaps/prompt-engineering/content/basic-llm/how-llms-built.md deleted file mode 100644 index f2112f18a..000000000 --- a/src/data/roadmaps/prompt-engineering/content/basic-llm/how-llms-built.md +++ /dev/null @@ -1,17 +0,0 @@ -# How are LLMs Built? - -On a high level, training an LLM model involves three steps i.e. data collection, training and evaluation. - -- **Data Collection** The first step is to collect the data that will be used to train the model. The data can be collected from various sources such as Wikipedia, news articles, books, websites etc. - -- **Training**: The data then goes through a training pipeline where it is cleaned and preprocessed before being fed into the model for training. The training process usually takes a long time and requires a lot of computational power. - -- **Evaluation**: The final step is to evaluate the performance of the model to see how well it performs on various tasks such as question answering, summarization, translation etc. - -The output from the training Pipeline is an LLM model which is simply the parameters or weights which capture the knowledge learned during the training process. These parameters or weights are typically serialized and stored in a file, which can then be loaded into any application that requires language processing capabilities e.g. text generation, question answering, language processing etc. - -Learn more from the following resources: - -- [@article@What is LLM & How to Build Your Own Large Language Models?](https://www.signitysolutions.com/blog/how-to-build-large-language-models) -- [@guides@Large language model](https://en.wikipedia.org/wiki/Large_language_model) -- [@video@Five Steps to Create a New AI Model](https://youtu.be/jcgaNrC4ElU) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/basic-llm/index.md b/src/data/roadmaps/prompt-engineering/content/basic-llm/index.md deleted file mode 100644 index b9396ddbd..000000000 --- a/src/data/roadmaps/prompt-engineering/content/basic-llm/index.md +++ /dev/null @@ -1,8 +0,0 @@ -# Basic LLM Concepts - -LLM stands for "Large Language Model." These are advanced AI systems designed to understand and generate human-like text based on the input they receive. These models have been trained on vast amounts of text data and can perform a wide range of language-related tasks, such as answering questions, carrying out conversations, summarizing text, translating languages, and much more. - -Visit the following resources to learn more about LLMs. - -- [@roadmap.sh@Introduction to LLMs](https://roadmap.sh/guides/introduction-to-llms) -- [@feed@Explore top posts about LLM](https://app.daily.dev/tags/llm?ref=roadmapsh) diff --git a/src/data/roadmaps/prompt-engineering/content/basic-llm/llm-types.md b/src/data/roadmaps/prompt-engineering/content/basic-llm/llm-types.md deleted file mode 100644 index d9ab5efb9..000000000 --- a/src/data/roadmaps/prompt-engineering/content/basic-llm/llm-types.md +++ /dev/null @@ -1,25 +0,0 @@ -# Types of LLMs - -On a high level, LLMs can be categorized into two types i.e. Base LLMs and Instruction tuned LLMs. - -## Base LLMs - -Base LLMs are the LLMs which are designed to predict the next word based on the training data. They are not designed to answer questions, carry out conversations or help solve problems. For example, if you give a base LLM the sentence "In this book about LLMs, we will discuss", it might complete this sentence and give you "In this book about LLMs, we will discsus **what LLMs are, how they work, and how you can leverage them in your applications.**" Or if you give it "What are some famous social networks?", instead of answering it might give back "Why do people use social networks?" or "What are some of the benefits of social networks?". As you can see, it is giving us relevant text but it is not answering the question. This is where the Instruction Tuned LLMs come into the picture. - -## Instruction tuned LLMs - -Instruction Tuned LLMs, instead of trying to autocomplete your text, try to follow the given instructions using the data that they have been trained on. For example, if you input the sentence "What are LLMs?" it will use the data that it is trained on and try to answer the question. Similarly, if you input "What are some famous social networks?" it will try to answer the question instead of giving you a random answer. - -Instruction Tuned LLMs are built on top of Base LLMs: - -``` -Instruction Tuned LLMs = Base LLMs + Further Tuning + RLHF -``` - -To build an Instruction Tuned LLM, a Base LLM is taken and is further trained using a large dataset covering sample "Instructions" and how the model should perform as a result of those instructions. The model is then fine-tuned using a technique called "Reinforcement Learning with Human Feedback" (RLHF) which allows the model to learn from human feedback and improve its performance over time. - -Learn more from the following resources: - -- [@article@Understanding AI Models: Base Language Learning Models vs. Instruction Tuned Language Learning Models - Olivier Mills](https://oliviermills.com/articles/understanding-ai-models-base-language-learning-models-vs-instruction-tuned-language-learning-models) -- [@video@Why Are There So Many Foundation Models?](https://www.youtube.com/watch?v=QPQy7jUpmyA) -- [@video@How to Pick the Right AI Foundation Model](https://www.youtube.com/watch?v=pePAAGfh-IU) diff --git a/src/data/roadmaps/prompt-engineering/content/basic-llm/llm-vocabulary.md b/src/data/roadmaps/prompt-engineering/content/basic-llm/llm-vocabulary.md deleted file mode 100644 index 5c785f2a8..000000000 --- a/src/data/roadmaps/prompt-engineering/content/basic-llm/llm-vocabulary.md +++ /dev/null @@ -1,35 +0,0 @@ -# Vocabulary - -When working with LLMs, you will come across a lot of new terms. This section will help you understand the meaning of these terms and how they are used in the context of LLMs. - -- **Machine Learning (ML)** — ML is a field of study that focuses on algorithms that can learn from data. ML is a subfield of AI. - -- **"Model" vs. "AI" vs. "LLM"** — These terms are used somewhat interchangeably throughout this course, but they do not always mean the same thing. LLMs are a type of AI, as noted above, but not all AIs are LLMs. When we mentioned models in this course, we are referring to AI models. As such, in this course, you can consider the terms "model" and "AI" to be interchangeable. - -- **LLM** — Large language model. A large language model is a type of artificial intelligence that can understand and generate human-like text based on the input it receives. These models have been trained on vast amounts of text data and can perform a wide range of language-related tasks, such as answering questions, carrying out conversations, summarizing text, translating languages, and much more. - -- **MLM** — Masked language model. A masked language model is a type of language model that is trained to predict the next word in a sequence of words. It is typically trained on a large corpus of text data and can be used for a variety of tasks, such as machine translation, sentiment analysis, summarization, and more. - -- **NLP** — Natural language processing. Natural language processing is a branch of artificial intelligence that deals with the interaction between computers and human languages. It is used to analyze, understand, and generate human language. - -- **Label** — Labels are just possibilities for the classification of a given text. For example, if you have a text that says "I love you", then the labels could be "positive", "negative", or "neutral". The model will try to predict which label is most likely to be correct based on the input text. - -- **Label Space** — The label space is the set of all possible labels that can be assigned to a given text. For example, if you have a text that says "I love you", then the label space could be "positive", "negative", or "neutral". - -- **Label Distribution** — The label distribution is the probability distribution over the label space. For example, if you have a text that says "I love you", then the label distribution could be \[0.8, 0.1, 0.1]. This means that the model thinks there is an 80% chance that the text is positive, a 10% chance that it is negative, and a 10% chance that it is neutral. - -- **Sentiment Analysis** — Sentiment analysis is the process of determining the emotional tone behind a series of words, used to gain an understanding of the attitudes, opinions and emotions expressed within an online mention. Sentiment analysis is also known as opinion mining, deriving the opinion or attitude of a speaker. - -- **Verbalizer** — In the classification setting, verbalizers are mappings from labels to words in a language model's vocabulary. For example, consider performing sentiment classification with the following prompt: - ``` - Tweet: "I love hotpockets" - What is the sentiment of this tweet? Say 'pos' or 'neg'. - ``` - Here, the verbalizer is the mapping from the conceptual labels of `positive` and `negative` to the tokens `pos` and `neg`. - -- **Reinforcement Learning from Human Feedback (RLHF)** — RLHF is a technique for training a model to perform a task by providing it with human feedback. The model is trained to maximize the amount of positive feedback it receives from humans, while minimizing the amount of negative feedback it receives. - -References and further learning: - -- [@article@LLM Vocabulary](https://learnprompting.org/docs/vocabulary) -- [@feed@Explore top posts about LLM](https://app.daily.dev/tags/llm?ref=roadmapsh) diff --git a/src/data/roadmaps/prompt-engineering/content/basic-llm/what-are-llms.md b/src/data/roadmaps/prompt-engineering/content/basic-llm/what-are-llms.md deleted file mode 100644 index 2f9d22eca..000000000 --- a/src/data/roadmaps/prompt-engineering/content/basic-llm/what-are-llms.md +++ /dev/null @@ -1,17 +0,0 @@ -# What are LLMs? - -LLMs, or Large Language Models, are advanced Artificial Intelligence models specifically designed for understanding and generating human language. These models are typically based on deep learning architectures, such as Transformers, and are trained on massive amounts of text data from various sources to acquire a deep understanding of the nuances and complexities of language. - -LLMs have the ability to achieve state-of-the-art performance in multiple Natural Language Processing (NLP) tasks, such as machine translation, sentiment analysis, summarization, and more. They can also generate coherent and contextually relevant text based on given input, making them highly useful for applications like chatbots, question-answering systems, and content generation. - -As an example, OpenAI's GPT-3 is a prominent LLM that has gained significant attention due to its capability to generate high-quality text and perform a variety of language tasks with minimal fine-tuning. - -Learn more from the following resources: - -- [@roadmap.sh@Introduction to LLMs](https://roadmap.sh/guides/introduction-to-llms) -- [@article@Large language model](https://en.wikipedia.org/wiki/Large_language_model) -- [@video@Intro to Large Language Models](https://www.youtube.com/watch?v=zjkBMFhNj_g) -- [@video@Large Language Model Operations (LLMOps) Explained](https://www.youtube.com/watch?v=cvPEiPt7HXo) -- [@video@How Large Language Models Work](https://youtu.be/5sLYAQS9sWQ) -- [@feed@Explore top posts about LLM](https://app.daily.dev/tags/llm?ref=roadmapsh) - diff --git a/src/data/roadmaps/prompt-engineering/content/calibrating-llms@P5nDyQbME53DOEfSkcY6I.md b/src/data/roadmaps/prompt-engineering/content/calibrating-llms@P5nDyQbME53DOEfSkcY6I.md new file mode 100644 index 000000000..1d546c665 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/calibrating-llms@P5nDyQbME53DOEfSkcY6I.md @@ -0,0 +1 @@ +# Calibrating LLMs \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/chain-of-thought-cot-prompting@weRaJxEplhKDyFWSMeoyI.md b/src/data/roadmaps/prompt-engineering/content/chain-of-thought-cot-prompting@weRaJxEplhKDyFWSMeoyI.md new file mode 100644 index 000000000..f8ac8ad8c --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/chain-of-thought-cot-prompting@weRaJxEplhKDyFWSMeoyI.md @@ -0,0 +1 @@ +# Chain of Thought (CoT) Prompting \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/context-window@b-Xtkv6rt8QgzJXSShOX-.md b/src/data/roadmaps/prompt-engineering/content/context-window@b-Xtkv6rt8QgzJXSShOX-.md new file mode 100644 index 000000000..d2dbf8ba6 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/context-window@b-Xtkv6rt8QgzJXSShOX-.md @@ -0,0 +1 @@ +# Context Window \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/contextual-prompting@5TNK1KcSzh9GTKiEJnM-y.md b/src/data/roadmaps/prompt-engineering/content/contextual-prompting@5TNK1KcSzh9GTKiEJnM-y.md new file mode 100644 index 000000000..a33ef1905 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/contextual-prompting@5TNK1KcSzh9GTKiEJnM-y.md @@ -0,0 +1 @@ +# Contextual Prompting \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/fine-tuning-vs-prompt-engg@Ke5GT163k_ek9SzbcbBGE.md b/src/data/roadmaps/prompt-engineering/content/fine-tuning-vs-prompt-engg@Ke5GT163k_ek9SzbcbBGE.md new file mode 100644 index 000000000..9ea09cdc2 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/fine-tuning-vs-prompt-engg@Ke5GT163k_ek9SzbcbBGE.md @@ -0,0 +1 @@ +# Fine-Tuning vs Prompt Engg. \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/frequency-penalty@YIVNjkmTOY61VmL0md9Pj.md b/src/data/roadmaps/prompt-engineering/content/frequency-penalty@YIVNjkmTOY61VmL0md9Pj.md new file mode 100644 index 000000000..149b0e10f --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/frequency-penalty@YIVNjkmTOY61VmL0md9Pj.md @@ -0,0 +1 @@ +# Frequency Penalty \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/google@o-6UKLZ6oCRbAKgRjH2uI.md b/src/data/roadmaps/prompt-engineering/content/google@o-6UKLZ6oCRbAKgRjH2uI.md new file mode 100644 index 000000000..fd0d223e1 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/google@o-6UKLZ6oCRbAKgRjH2uI.md @@ -0,0 +1 @@ +# Google \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/hallucination@SWDa3Su3VS815WQbvvNsa.md b/src/data/roadmaps/prompt-engineering/content/hallucination@SWDa3Su3VS815WQbvvNsa.md new file mode 100644 index 000000000..0d348c082 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/hallucination@SWDa3Su3VS815WQbvvNsa.md @@ -0,0 +1 @@ +# Hallucination \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/image-prompting/deformed-generations.md b/src/data/roadmaps/prompt-engineering/content/image-prompting/deformed-generations.md deleted file mode 100644 index 50b190696..000000000 --- a/src/data/roadmaps/prompt-engineering/content/image-prompting/deformed-generations.md +++ /dev/null @@ -1,7 +0,0 @@ -# Fix Deformed Generations - -Deformed generations in image prompting refer to outputs from generative models that do not meet the intended aesthetic or structural quality, particularly when involving human body parts like hands and feet. This issue can often be mitigated using negative prompts, which instruct the AI to de-emphasize certain undesired features. Key strategies to address this problem include refining prompt design, using negative prompts and weighted terms, selecting appropriate models, implementing iterative processes and feedback loops, keeping models updated, and applying post-processing techniques. While current models may still struggle with certain deformations, employing these strategies effectively can significantly enhance image quality. As generative models continue to evolve, the need for such techniques is expected to decrease, but they remain essential for anyone working with AI-generated imagery to ensure outputs meet desired standards of quality and realism. - -Learn more from the following resources: -- [@article@How to Fix Hands in Stable Diffusion: A Step-by-Step Guide - AI Prompt Directory](https://www.aipromptsdirectory.com/how-to-fix-hands-in-stable-diffusion-a-step-by-step-guide/) -- [@guide@Guide to Negative Prompts in Stable Diffusion | getimg.ai](https://getimg.ai/guides/guide-to-negative-prompts-in-stable-diffusion) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/image-prompting/index.md b/src/data/roadmaps/prompt-engineering/content/image-prompting/index.md deleted file mode 100644 index 1e9b99240..000000000 --- a/src/data/roadmaps/prompt-engineering/content/image-prompting/index.md +++ /dev/null @@ -1,29 +0,0 @@ -# Image Prompting - -## Image Prompting - -Image prompting is a technique used in the process of developing and refining prompts to work with AI models, particularly those that are designed for processing and generating descriptions, captions, or other textual responses based on visual input. In this section, we will discuss the essentials of image prompting and provide some suggestions to create better image prompts. - -### Key Concepts - -When working with AI models that process images, it is crucial to understand that the model's performance often depends on the quality and relevance of the image prompt. The following key concepts will help you understand and craft effective image prompts: - -1. **Descriptiveness**: Image prompts should encourage the AI model to generate a comprehensive and detailed response. For example, instead of simply asking for the scene description, you can ask the AI to describe the scene, including specific elements and their relationships. - -2. **Context**: Image prompts should provide enough context to help the AI produce appropriate responses. You can include details like the setting or the environment when crafting the prompt. - -3. **Precision**: Specify the level of detail you want in the response. The image prompt should be designed to elicit precise, relevant, and focused responses from the AI. Avoid ambiguous or overly general instructions. - -### Tips for Creating Effective Image Prompts - -To create compelling image prompts, consider the following tips: - -1. **Start with a clear goal**: Define the information you want the AI to extract from the image and design the prompt accordingly. - -2. **Adapt to the image content**: Understanding the image's subject and context will help you design prompts that yield better results. Make sure the prompts address the key aspects of the image. - -3. **Test and iterate**: Experimentation is crucial to create better image prompts. Test your prompts with various images and fine-tune them based on the AI's responses. - -4. **Balance simplicity and complexity**: While it's essential to provide clear instructions to the AI, avoid making the prompt overly complicated. Aim for a balance between simplicity and detail to elicit accurate and useful responses. - -With these principles in mind, you'll be well on your way to mastering image prompting, harnessing the power of AI to generate valuable insights and engaging responses based on visual input. Happy prompting! \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/image-prompting/quality-boosters.md b/src/data/roadmaps/prompt-engineering/content/image-prompting/quality-boosters.md deleted file mode 100644 index a20fcd97b..000000000 --- a/src/data/roadmaps/prompt-engineering/content/image-prompting/quality-boosters.md +++ /dev/null @@ -1,9 +0,0 @@ -# Quality Boosters - -Quality boosters in AI image generation are techniques and tools used to enhance the visual output of generated images. These include advanced model architectures, use of natural and descriptive language in prompts, resolution specifications, AI image enhancers, style modifiers, iterative feedback, and experimentation with prompt lengths. Advanced models like Stable Diffusion 3.5 offer improved customization, while incorporating descriptive terms and specifying high resolutions can enhance detail and appeal. Currennt AI image tools as can upscale and refine images. Adding style-related terms and implementing feedback loops for refinement can lead to more personalized results. By employing these strategies, users can significantly improve the quality of AI-generated images, achieving more detailed, aesthetically pleasing, and tailored outputs. - -Learn more from the following resources: - -- [@guide@Quality Boosters | Learnprompting.org]( https://learnprompting.org/docs/image_prompting/quality_boosters) -- [@article@A Beginner’s Guide to Prompt Design for Text-to-Image Generative Models |Medium.com](https://towardsdatascience.com/a-beginners-guide-to-prompt-design-for-text-to-image-generative-models-8242e1361580) -- [@guide@Enhancing Text-to-Image Prompts: Techniques and Best Practices - Steh Blog]( https://steh.github.io/informationsecurity/text-image-prompts/ ) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/image-prompting/style-modifiers.md b/src/data/roadmaps/prompt-engineering/content/image-prompting/style-modifiers.md deleted file mode 100644 index e4ee580fd..000000000 --- a/src/data/roadmaps/prompt-engineering/content/image-prompting/style-modifiers.md +++ /dev/null @@ -1,9 +0,0 @@ -# Style Modifiers - -Style modifiers are essential tools in AI image prompting that allow users to specify the style of generated images. These descriptors can include elements such as art styles, materials, techniques, and even moods or historical periods. By combining multiple style modifiers, users can achieve highly specific and creative outputs. Recent advancements have expanded the categories of style modifiers and improved AI models' ability to interpret them. Additionally, these modifiers are now supported by a wider range of AI tools, enhancing their applicability in various creative fields. - -Learn more from the following resources: - -- [@guide@Style Modifiers | LearnPrompting.org ](https://learnprompting.org/docs/image_prompting/style_modifiers) -- [@article@A Beginner’s Guide to Prompt Design for Text-to-Image Generative Models | Medium.com]( https://towardsdatascience.com/a-beginners-guide-to-prompt-design-for-text-to-image-generative-models-8242e1361580 ) -- [@guide@Enhancing Text-to-Image Prompts: Techniques and Best Practices | Steh Blog]( https://steh.github.io/informationsecurity/text-image-prompts/ ) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/image-prompting/weighted-terms.md b/src/data/roadmaps/prompt-engineering/content/image-prompting/weighted-terms.md deleted file mode 100644 index 11ce905f5..000000000 --- a/src/data/roadmaps/prompt-engineering/content/image-prompting/weighted-terms.md +++ /dev/null @@ -1,10 +0,0 @@ -# Weighted Terms - -Weighted terms in image prompting are a technique used to control the output of AI-generated images by _emphasizing_ or _de-emphasizing_ certain words or phrases within a prompt. This method influences the model's focus and the resulting image. Models like Stable Diffusion and Midjourney allow users to assign weights to terms, which can significantly alter the generated image. For example, using (`mountain:1.5`) would emphasize the mountain aspect in an image prompt. Positive weights increase emphasis on desired elements, while negative weights de-emphasize or exclude elements. The placement of terms in the prompt also affects their influence, with words at the beginning generally having more impact. Adjusting weights often requires multiple attempts to achieve the desired result, and overemphasis on certain terms may limit creativity and diversity in generated images. - -Learn more from the following resources: - - -- [@article@Weighted Terms | Learnprompting.org](https://learnprompting.org/docs/image_prompting/weighted_terms) -- [@article@Complete Prompting Guide | SeaArt Guide](https://docs.seaart.ai/guide-1/4-parameters/4-6-complete-prompting-guide) -- [@article@Understanding the Use of Parentheses in Prompt Weighting for Stable Diffusion | Tensor.Art](https://tensor.art/articles/736115871065484219) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/index.md b/src/data/roadmaps/prompt-engineering/content/index.md deleted file mode 100644 index 4e768b56d..000000000 --- a/src/data/roadmaps/prompt-engineering/content/index.md +++ /dev/null @@ -1 +0,0 @@ -# \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/introduction@jrH1qE6EnFXL4fTyYU8gR.md b/src/data/roadmaps/prompt-engineering/content/introduction@jrH1qE6EnFXL4fTyYU8gR.md new file mode 100644 index 000000000..f6ecaa676 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/introduction@jrH1qE6EnFXL4fTyYU8gR.md @@ -0,0 +1 @@ +# Introduction \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/bias.md b/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/bias.md deleted file mode 100644 index 3e5ba0875..000000000 --- a/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/bias.md +++ /dev/null @@ -1,11 +0,0 @@ -# Bias - -Bias in Large Language Models (LLMs) remains a significant challenge, with models often generating stereotypical or discriminatory responses despite advancements in mitigation techniques. These biases can manifest in various forms, including gender, racial, and cultural prejudices, potentially leading to underfitting or overfitting in model outputs. Recent studies have highlighted persistent biases in LLM-generated content, emphasizing the need for caution when deploying these models in consumer-facing applications or research settings. Efforts to address this issue include developing diverse training datasets, implementing regulatory frameworks, and creating new evaluation tools. However, the challenge remains substantial as LLMs continue to influence societal perceptions. Developers and users must be aware of these pitfalls to avoid reputational damage and unintended negative impacts on individuals or communities. - -Learn more from the following resources: - -- [@guides@Biases in Prompts: Learn how to tackle them](https://mindfulengineer.ai/understanding-biases-in-prompts/) -- [@guides@Bias in AI: tackling the issues through regulations and standards](https://publicpolicy.ie/papers/bias-in-ai-tackling-the-issues-through-regulations-and-standards/) -- [@article@What Is AI Bias?](https://www.ibm.com/topics/ai-bias) -- [@article@What Is Algorithmic Bias?](https://www.ibm.com/think/topics/algorithmic-bias) -- [@article@AI Bias Examples](https://www.ibm.com/think/topics/shedding-light-on-ai-bias-with-real-world-examples) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/citing-sources.md b/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/citing-sources.md deleted file mode 100644 index c72056e32..000000000 --- a/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/citing-sources.md +++ /dev/null @@ -1,8 +0,0 @@ -# Citing Sources - -As advancements have been made in the ability of Large Language Models (LLMs) to cite sources — particularly through realtime API access, search-augmented generation and specialized training — significant limitations persist. LLMs continue to struggle with hallucinations, generating inaccurate or fictitious citation. Many LLM lack real-time API access, which hampers their ability to provide up-to-date information or are limited by their knowledge cut off dates. They sometimes cannot independently verify sources or fully grasp the contextual relevance of citations, raising concerns regarding plagiarism and intellectual property. To address these challenges, ongoing efforts focus on improving realtime retrieval (RAG) methods, enhancing training, and integrating human oversight to ensure accuracy in citations. - -Learn more from the following resources: - -- [@guides@Why Don’t Large Language Models Share URL References in Their Responses](https://medium.com/@gcentulani/why-dont-large-language-models-share-url-references-in-their-responses-bf427e513861) -- [@article@Effective large language model adaptation for improved grounding](https://research.google/blog/effective-large-language-model-adaptation-for-improved-grounding/) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/hallucinations.md b/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/hallucinations.md deleted file mode 100644 index f323dffef..000000000 --- a/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/hallucinations.md +++ /dev/null @@ -1,14 +0,0 @@ -# Hallucinations - -Large Language Model (LLM) hallucinations can be broadly categorized into faithfulness and factuality issues. **Faithfulness hallucinations** occur when the model's output deviates from provided sources or context, including problems with source-reference divergence, context retrieval, dialogue history misinterpretation, and erroneous summarization. **Factuality hallucinations**, on the other hand, involve the generation of incorrect or unsupported information, encompassing factual inaccuracies, entity errors, overclaims, unverifiable statements, nonsensical responses, contradictions, and fabricated data. - -These hallucinations stem from various causes such as training data issues, model limitations, prompt-related problems, and overfitting. To mitigate these challenges, strategies like Retrieval-Augmented Generation (RAG), improved training data, rigorous evaluation, clear user communication, advanced prompt engineering, model fine-tuning, output filtering, and multi-model approaches are being employed. As the field progresses, understanding and addressing these hallucination types remains crucial for enhancing the reliability and trustworthiness of LLM-generated content. - -Learn more from the following resources: - -- [@article@What are AI hallucinations?](https://www.ibm.com/topics/ai-hallucinations) -- [@article@Hallucination (artificial intelligence) - Wikipedia](https://en.wikipedia.org/wiki/Hallucination_(artificial_intelligence)) -- [@video@Why Large Language Models Hallucinate - IBM](https://www.youtube.com/watch?v=cfqtFvWOfg0) -- [@video@Risks of Large Language Models - IBM](https://www.youtube.com/watch?v=r4kButlDLUc) -- [@guides@Key Strategies to Minimize LLM Hallucinations](https://www.turing.com/resources/minimize-llm-hallucinations-strategy) - diff --git a/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/index.md b/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/index.md deleted file mode 100644 index 3835b9a2f..000000000 --- a/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/index.md +++ /dev/null @@ -1,76 +0,0 @@ -# Pitfalls of LLMs - -LLMs are extremely powerful. There are many pitfalls, safety challenges and risks that you should be aware of when using them. - -### Language Translation - -There are several risks associated with LLMs in language translation. - -- Inaccurate translations -- Contextual misinterpretation -- Biased translations -- Deepfakes -- Privacy and data security -- Legal and regulatory compliance - -### Text Generation - -Text generation is a powerful capability of LLMs but also introduces certain risks and challenges. - -- Misinformation and fake news -- Bias amplification -- Offensive or inappropriate content -- Plagiarism and copyright infringement -- Lack of transparency -- Privacy breaches - -### Question Answering - -LLMs present several risks in the domain of question answering. - -- Hallucination -- Outdated information -- Bias -- Harmful answers -- Lack of contextual understanding -- Privacy and security concerns -- Lack of transparency and explainability - -### Text summarization - -Text summarization is a powerful application of LLMs but also introduces certain risks and challenge - -- Information loss -- Bias amplification -- Contextual misinterpretation - -### Sentiment analysis - -Sentiment analysis, the process of determining a piece of text’s sentiment or emotional tone, is an application where LLMs are frequently employed. - -- Biased sentiment analysis -- Cultural and contextual nuances -- Limited domain understanding -- Misinterpretation of negation and ambiguity -- Overgeneralization and lack of individual variation - -### Code Assistance - -Code assistance and generation is an area where LLMs have shown promising capabilities. - -- Security vulnerabilities -- Performance and efficiency challenges -- Quality and reliability concerns -- Insufficient understanding of business or domain context -- Intellectual property concerns - -Read more from [Risks of Large Language Models: A comprehensive guide](https://www.deepchecks.com/risks-of-large-language-models/). - -Learn more from the following resources: - -- [@video@Risks of Large Language Models - IBM](https://www.youtube.com/watch?v=r4kButlDLUc) -- [@article@Risks of Large Language Models: A comprehensive guide](https://www.deepchecks.com/risks-of-large-language-models/) -- [@article@Limitations of LLMs: Bias, Hallucinations, and More](https://learnprompting.org/docs/basics/pitfalls) -- [@guides@Risks & Misuses | Prompt Engineering Guide](https://www.promptingguide.ai/risks) -- [@guides@OWASP Top 10 for LLM & Generative AI Security](https://genai.owasp.org/llm-top-10/) -- [@guides@LLM Security Guide - Understanding the Risks of Prompt Injections and Other Attacks on Large Language Models ](https://www.mlopsaudits.com/blog/llm-security-guide-understanding-the-risks-of-prompt-injections-and-other-attacks-on-large-language-models) diff --git a/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/math.md b/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/math.md deleted file mode 100644 index 793fb08c0..000000000 --- a/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/math.md +++ /dev/null @@ -1,7 +0,0 @@ -# Math - -LLMs struggle with math. While they may have improved in solving simple math problems; they, however, coming up short when solving more complex math problems when minor semantic variation happens. This is particularly relevant in terms of mathematical reasoning. Despite advancements, they often fail at solving simple math problems and are unable to handle more complex ones effectively. Studies show that LLMs rely heavily on pattern recognition rather than genuine logical reasoning, leading to significant performance drops when faced with minor changes in problem wording or irrelevant information. This highlights a critical limitation in their reasoning capabilities. - -Learn more from the following resources: - -- [@article@Apple Says AI’s Math Skills Fall Short](https://www.pymnts.com/artificial-intelligence-2/2024/apple-says-ais-math-skills-fall-short/) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/prompt-hacking.md b/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/prompt-hacking.md deleted file mode 100644 index 1c142d22e..000000000 --- a/src/data/roadmaps/prompt-engineering/content/llm-pitfalls/prompt-hacking.md +++ /dev/null @@ -1,11 +0,0 @@ -# Prompt Hacking - -Prompt hacking is a form of adversarial prompting where language models are manipulated to generate outputs that violate safety guidelines or are off-topic. Common techniques include manipulating keywords, exploiting grammar and negations, and using leading questions. To combat this, developers implement safety mechanisms such as content filters, continual analysis, and carefully designed prompt templates. As language models become more integrated into digital infrastructure, concerns about prompt injection, data leakage, and potential misuse have grown. In response, evolving defense strategies like prompt shields, enhanced input validation, and fine-tuning for adversarial detection are being developed. Continuous monitoring and improvement of these safety measures are crucial to ensure responsible model behaviour and output alignment with desired guidelines. - -Learn more from the following resources: - -- [@article@Prompt Hacking](https://learnprompting.org/docs/category/-prompt-hacking) -- [@article@LLM Security Guide - Understanding the Risks of Prompt Injections and Other Attacks on Large Language Models ](https://www.mlopsaudits.com/blog/llm-security-guide-understanding-the-risks-of-prompt-injections-and-other-attacks-on-large-language-models) -- [@guides@OWASP Top 10 for LLM & Generative AI Security](https://genai.owasp.org/llm-top-10/) -- [@video@Explained: The OWASP Top 10 for Large Language Model Applications](https://www.youtube.com/watch?v=cYuesqIKf9A) -- [@video@Artificial Intelligence: The new attack surface](https://www.youtube.com/watch?v=_9x-mAHGgC4) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/llm-self-evaluation@CvV3GIvQhsTvE-TQjTpIQ.md b/src/data/roadmaps/prompt-engineering/content/llm-self-evaluation@CvV3GIvQhsTvE-TQjTpIQ.md new file mode 100644 index 000000000..210fbacbd --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/llm-self-evaluation@CvV3GIvQhsTvE-TQjTpIQ.md @@ -0,0 +1 @@ +# LLM Self Evaluation \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/llm-settings/index.md b/src/data/roadmaps/prompt-engineering/content/llm-settings/index.md deleted file mode 100644 index 82f73fd81..000000000 --- a/src/data/roadmaps/prompt-engineering/content/llm-settings/index.md +++ /dev/null @@ -1,26 +0,0 @@ -# LLM Settings - -LLM (Language Model) settings play a crucial role in prompt engineering as they directly influence the behavior and output of the language model. In this section, we will discuss some of the important LLM settings that you need to consider while designing prompts. - -## 1. Temperature - -Temperature is a hyperparameter that controls the randomness of the output generated by the language model. A higher temperature will result in more diverse and creative responses, while a lower temperature will produce more focused and deterministic responses. - -- **High Temperature (e.g., 1.0):** More random and creative outputs, higher chances of deviation from the topic, and potentially lower relevance. -- **Low Temperature (e.g., 0.2):** More deterministic outputs, focused on the provided input, and higher relevance. - -## 2. Max Tokens - -Max tokens determine the length of the output generated by the model. By controlling the number of tokens in the response, you can influence the verbosity of the language model. - -- **Higher Max Tokens:** Longer responses, more details, and higher chances of going off-topic. -- **Lower Max Tokens:** Shorter responses, more concise, but might cut off important information. - -## 3. Top-K Sampling - -Top-K sampling is an approach to limit the number of predicted words that the language model can consider. By specifying a smaller K value, you can restrict the output to be focused and prevent the model from generating unrelated information. - -- **High K Value:** Model considers more word options and might generate diverse content, but with a higher risk of going off-topic. -- **Low K Value:** Model has limited word options, leading to focused and related content. - -These LLM settings give you control over the output of the language model, helping you steer the responses according to your requirements. Understanding the balance between these settings can improve the effectiveness of your prompt engineering efforts. \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/llm-settings/other-hyper-params.md b/src/data/roadmaps/prompt-engineering/content/llm-settings/other-hyper-params.md deleted file mode 100644 index 23f3b4e7e..000000000 --- a/src/data/roadmaps/prompt-engineering/content/llm-settings/other-hyper-params.md +++ /dev/null @@ -1,2 +0,0 @@ -# Other Hyperparameters - diff --git a/src/data/roadmaps/prompt-engineering/content/llm-settings/temperature.md b/src/data/roadmaps/prompt-engineering/content/llm-settings/temperature.md deleted file mode 100644 index 20cfac1a5..000000000 --- a/src/data/roadmaps/prompt-engineering/content/llm-settings/temperature.md +++ /dev/null @@ -1,17 +0,0 @@ -# Temperature - -Temperature is an important setting in the Language Models (LMs), specifically for the fine-tuning process. It refers to the "temperature" parameter in the softmax function of the language model. Adjusting the temperature can influence the randomness or conservativeness of the model's output. - -## Role of Temperature - -The temperature controls the model's level of creativity and boldness in generating text. A lower temperature value makes the model more conservative, sticking closely to the patterns it has learned from the training data. Higher temperature values encourage the model to explore riskier solutions by allowing less likely tokens to be more probable. - -## Practical Uses - -When fine-tuning an LM, you can regulate its behavior by adjusting the temperature: - -- **Lower temperature values** (e.g., 0.2 or 0.5): The model will be more focused on phrases and word sequences that it learned from the training data. The output will be less diverse, but may lack novelty or creativity. Suitable for tasks where conservativeness is important, such as text summarization or translation. - -- **Higher temperature values** (e.g., 1.0 or 2.0): The model will generate more creative outputs with innovative combinations of words. However, it may produce less coherent or contextually improper text. Useful for tasks where exploration and distinctiveness are required, like creative writing or brainstorming. - -Experimenting with various temperature values can lead to finding the optimal balance between creativity and coherence, depending on the specific task and desired output. \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/llm-settings/top-p.md b/src/data/roadmaps/prompt-engineering/content/llm-settings/top-p.md deleted file mode 100644 index 6e460f057..000000000 --- a/src/data/roadmaps/prompt-engineering/content/llm-settings/top-p.md +++ /dev/null @@ -1,20 +0,0 @@ -# Top P Sampling - -Top P, also known as "nucleus sampling," is a method that provides a more dynamic way to control the randomness of a model's generated output. It improves the trade-off between quality and diversity in text generation. - -## How Top P Works? - -Instead of picking the top K tokens with the highest probability like in Top K sampling, Top P sampling picks a number of tokens whose cumulative probability adds up to the given value of P. P is a probability mass with a range between 0 and 1. This means that the number of tokens picked will vary, automatically adapting to the distribution in a more granular way. - -## Advantages of Top P - -1. **More diverse and coherent outputs**: Top P sampling strikes a balance between overly conservative and highly random text. This creates more diverse and coherent outputs compared to Top K sampling. -2. **Adaptive threshold**: The dynamic nature of Top P sampling allows it to adapt to the token probability distribution, unlike Top K sampling which requires manual tuning of K. -3. **Prevents OOV tokens**: By gathering the tokens based on a cumulative probability threshold, Top P sampling effectively prevents selecting out-of-vocabulary (OOV) tokens. - -## Adjusting Top P Value - -- **Lower values**: Decreasing the value of P will result in more focused outputs, potentially at the expense of diversity. -- **Higher values**: Increasing the value of P will encourage the model to explore more diverse responses, possibly at the cost of coherence. - -In practice, a commonly used Top P value is 0.9, but you should experiment with different values for P depending on your specific use-case and desired balance between diversity and coherence. \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/llm@pamV5Z8DRKk2ioZbg6QVK.md b/src/data/roadmaps/prompt-engineering/content/llm@pamV5Z8DRKk2ioZbg6QVK.md new file mode 100644 index 000000000..13ae8d124 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/llm@pamV5Z8DRKk2ioZbg6QVK.md @@ -0,0 +1 @@ +# LLM \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/llms-and-how-they-work@74JxgfJ_1qmVNZ_QRp9Ne.md b/src/data/roadmaps/prompt-engineering/content/llms-and-how-they-work@74JxgfJ_1qmVNZ_QRp9Ne.md new file mode 100644 index 000000000..f5e38292f --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/llms-and-how-they-work@74JxgfJ_1qmVNZ_QRp9Ne.md @@ -0,0 +1 @@ +# LLMs and how they work? \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/max-tokens@vK9Gf8dGu2UvvJJhhuHG9.md b/src/data/roadmaps/prompt-engineering/content/max-tokens@vK9Gf8dGu2UvvJJhhuHG9.md new file mode 100644 index 000000000..ca9ce5d9a --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/max-tokens@vK9Gf8dGu2UvvJJhhuHG9.md @@ -0,0 +1 @@ +# Max Tokens \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/meta@Td2YzDFT4LPGDw8JMmQSQ.md b/src/data/roadmaps/prompt-engineering/content/meta@Td2YzDFT4LPGDw8JMmQSQ.md new file mode 100644 index 000000000..dabc8eee5 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/meta@Td2YzDFT4LPGDw8JMmQSQ.md @@ -0,0 +1 @@ +# Meta \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/model-weights--parameters@yfsjW1eze8mWT0iHxv078.md b/src/data/roadmaps/prompt-engineering/content/model-weights--parameters@yfsjW1eze8mWT0iHxv078.md new file mode 100644 index 000000000..8df7a72ae --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/model-weights--parameters@yfsjW1eze8mWT0iHxv078.md @@ -0,0 +1 @@ +# Model Weights / Parameters \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/one-shot--few-shot-prompting@Iufv_LsgUNls-Alx_Btlh.md b/src/data/roadmaps/prompt-engineering/content/one-shot--few-shot-prompting@Iufv_LsgUNls-Alx_Btlh.md new file mode 100644 index 000000000..2c88ee178 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/one-shot--few-shot-prompting@Iufv_LsgUNls-Alx_Btlh.md @@ -0,0 +1 @@ +# One-Shot / Few-Shot Prompting \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/openai@Yb5cQiV2ETxPbBYCLOpt2.md b/src/data/roadmaps/prompt-engineering/content/openai@Yb5cQiV2ETxPbBYCLOpt2.md new file mode 100644 index 000000000..9357a775f --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/openai@Yb5cQiV2ETxPbBYCLOpt2.md @@ -0,0 +1 @@ +# OpenAI \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/output-control@wSf7Zr8ZYBuKWX0GQX6J3.md b/src/data/roadmaps/prompt-engineering/content/output-control@wSf7Zr8ZYBuKWX0GQX6J3.md new file mode 100644 index 000000000..e74592553 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/output-control@wSf7Zr8ZYBuKWX0GQX6J3.md @@ -0,0 +1 @@ +# Output Control \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/presence-penalty@WpO8V5caudySVehOcuDvK.md b/src/data/roadmaps/prompt-engineering/content/presence-penalty@WpO8V5caudySVehOcuDvK.md new file mode 100644 index 000000000..0a9e0c807 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/presence-penalty@WpO8V5caudySVehOcuDvK.md @@ -0,0 +1 @@ +# Presence Penalty \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompt-debiasing@0H2keZYD8iTNyBgmNVhto.md b/src/data/roadmaps/prompt-engineering/content/prompt-debiasing@0H2keZYD8iTNyBgmNVhto.md new file mode 100644 index 000000000..445b509cf --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/prompt-debiasing@0H2keZYD8iTNyBgmNVhto.md @@ -0,0 +1 @@ +# Prompt Debiasing \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompt-ensembling@HOqWHqAkxLX8f2ImSmZE7.md b/src/data/roadmaps/prompt-engineering/content/prompt-ensembling@HOqWHqAkxLX8f2ImSmZE7.md new file mode 100644 index 000000000..1012c651a --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/prompt-ensembling@HOqWHqAkxLX8f2ImSmZE7.md @@ -0,0 +1 @@ +# Prompt Ensembling \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompt-hacking/defensive-measures.md b/src/data/roadmaps/prompt-engineering/content/prompt-hacking/defensive-measures.md deleted file mode 100644 index b6c6ef411..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompt-hacking/defensive-measures.md +++ /dev/null @@ -1,8 +0,0 @@ -# Defensive Measures - -Defensive measures protect AI models from prompt attacks. Techniques include input sanitization, model fine-tuning, and prompt engineering. These strategies aim to enhance AI system security, prevent unauthorized access, and maintain ethical output generation. - -Visit the following resources to learn more: - -- [@article@Defensive Measures](https://learnprompting.org/docs/prompt_hacking/defensive_measures/overview) -- [@opensource@Prompt Injection Defenses](https://github.com/tldrsec/prompt-injection-defenses?tab=readme-ov-file#prompt-injection-defenses) diff --git a/src/data/roadmaps/prompt-engineering/content/prompt-hacking/index.md b/src/data/roadmaps/prompt-engineering/content/prompt-hacking/index.md deleted file mode 100644 index 94f1c4e4b..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompt-hacking/index.md +++ /dev/null @@ -1,8 +0,0 @@ -# Prompt Hacking - -Prompt hacking refers to techniques used to manipulate or exploit AI language models by carefully crafting input prompts. This practice aims to bypass the model's intended constraints or elicit unintended responses. Common methods include injection attacks, where malicious instructions are embedded within seemingly innocent prompts, and prompt leaking, which attempts to extract sensitive information from the model's training data. - -Visit the following resources to learn more: - -- [@article@Prompt Hacking](https://learnprompting.org/docs/prompt_hacking/intro) -- [@feed@Explore top posts about Security](https://app.daily.dev/tags/security?ref=roadmapsh) diff --git a/src/data/roadmaps/prompt-engineering/content/prompt-hacking/jailbreaking.md b/src/data/roadmaps/prompt-engineering/content/prompt-hacking/jailbreaking.md deleted file mode 100644 index c7221d6f4..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompt-hacking/jailbreaking.md +++ /dev/null @@ -1,8 +0,0 @@ -# Jailbreaking - -Jailbreaking bypasses AI models' ethical constraints and safety measures. Attackers use carefully crafted prompts to manipulate models into generating harmful, biased, or inappropriate content, potentially leading to misuse of AI systems. - -Visit the following resources to learn more: - -- [@article@Jailbreaking](https://learnprompting.org/docs/prompt_hacking/jailbreaking) -- [@opensource@Jailbreaking](https://github.com/dair-ai/Prompt-Engineering-Guide/blob/main/guides/prompts-adversarial.md#jailbreaking) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompt-hacking/offensive-measures.md b/src/data/roadmaps/prompt-engineering/content/prompt-hacking/offensive-measures.md deleted file mode 100644 index 3ef580371..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompt-hacking/offensive-measures.md +++ /dev/null @@ -1,8 +0,0 @@ -# Offensive Measures - -Offensive measures in prompt hacking actively test AI systems for vulnerabilities. Researchers use techniques like adversarial prompts and model probing to identify weaknesses, enabling improved defenses and highlighting potential risks in deployed AI models. - -Visit the following resources to learn more: - -- [@article@Offensive Measures](https://learnprompting.org/docs/prompt_hacking/offensive_measures/overview) -- [@article@Definitions and Types](https://www.gyata.ai/prompt-engineering/offensive-measures) diff --git a/src/data/roadmaps/prompt-engineering/content/prompt-hacking/prompt-injection.md b/src/data/roadmaps/prompt-engineering/content/prompt-hacking/prompt-injection.md deleted file mode 100644 index cbc783904..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompt-hacking/prompt-injection.md +++ /dev/null @@ -1,8 +0,0 @@ -# Prompt Injection - -Prompt injection exploits vulnerabilities in AI systems by inserting malicious instructions into user inputs. Attackers manipulate the model's behavior, potentially bypassing safeguards or extracting sensitive information. This technique poses security risks for AI-powered applications. - -Visit the following resources to learn more: - -- [@article@Prompt Injection](https://learnprompting.org/docs/prompt_hacking/injection) -- [@article@IBM Article](https://www.ibm.com/topics/prompt-injection) diff --git a/src/data/roadmaps/prompt-engineering/content/prompt-hacking/prompt-leaking.md b/src/data/roadmaps/prompt-engineering/content/prompt-hacking/prompt-leaking.md deleted file mode 100644 index c89e26d16..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompt-hacking/prompt-leaking.md +++ /dev/null @@ -1,8 +0,0 @@ -# Prompt Leaking - -Prompt leaking occurs when attackers trick AI models into revealing sensitive information from their training data or system prompts. This technique exploits model vulnerabilities to extract confidential details, potentially compromising privacy and security of AI systems. - -Visit the following resources to learn more: - -- [@article@Prompt Leaking](https://learnprompting.org/docs/prompt_hacking/leaking) -- [@opensource@Adversarial Prompting - Leaking](https://github.com/dair-ai/Prompt-Engineering-Guide/blob/main/guides/prompts-adversarial.md#prompt-leaking) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompt-injection@6W_ONYREbXHwPigoDx1cW.md b/src/data/roadmaps/prompt-engineering/content/prompt-injection@6W_ONYREbXHwPigoDx1cW.md new file mode 100644 index 000000000..7227af039 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/prompt-injection@6W_ONYREbXHwPigoDx1cW.md @@ -0,0 +1 @@ +# Prompt Injection \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompting-introduction/basic-prompting.md b/src/data/roadmaps/prompt-engineering/content/prompting-introduction/basic-prompting.md deleted file mode 100644 index 7937c4e79..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompting-introduction/basic-prompting.md +++ /dev/null @@ -1,34 +0,0 @@ -# Basic Prompting - -All you need to instruct model to perform a task is a prompt. A prompt is a piece of text that you give to the model to perform a task. - -For example, if you want to summarize an article, you could simply write the prompt with the article text on the top and the prompt: - -``` -Long article text here ............. -.................................... - -Summarize the above article for me. -``` - -Or if you want to translate a sentence from English to French, you could simply write the prompt with the English sentence on the top and the prompt: - -``` -This is a sentence in English. - -Translate the above sentence to French. -``` - -Or if you want to generate a new text, you could simply write the prompt with the instructions and the model will give you the text. - -``` -Write me an introductory guide about Prompt Engineering. -``` - -However, using plain text as prompts i.e. without using any best practices you may not be able to fully utilise the power of LLMs. That's where "Prompt Engineering" or knowing the best practices for writing better prompts and getting the most out of LLMs comes in. - -- [@guides@Basics of Prompting | Prompt Engineering Guide](https://www.promptingguide.ai/introduction/basics) -- [@article@Prompting Basics](https://learnprompting.org/docs/basics/prompting) -- [@offical@Prompt engineering - OpenAI API](https://platform.openai.com/docs/guides/prompt-engineering) -- [@offical@Prompt engineering overview - Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/prompt-engineering/overview) -- [@course@Introduction to Prompt Engineering (Playlist)](https://youtube.com/playlist?list=PLYio3GBcDKsPP2_zuxEp8eCulgFjI5a3g&si=n3Ot-tFECp4axL8L) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompting-introduction/index.md b/src/data/roadmaps/prompt-engineering/content/prompting-introduction/index.md deleted file mode 100644 index 4d7072753..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompting-introduction/index.md +++ /dev/null @@ -1,28 +0,0 @@ -# Introduction to Prompting - -Prompting is the process of giving a model a "prompt" or instruction for the task that you want it to perform. For example, if you have some English text that you may want to translate to French, you could give the following prompt: - -``` -Translate the text delimited by triple quotes from English to French: - -"""Hello, how are you?""" -``` - -The model will then generate the following output: - -``` -Bonjour, comment allez-vous? -``` - -In this example, we gave the model a prompt with instructions to perform a task. If you notice, we followed a special way to write our prompt. We could simply give it the following prompt and it would have still worked: - -``` -Translate the following to French: - -Hello, how are you? -``` - -But it's one of the best practices to be clear and use delimiters to separate the content in prompt from the instructions. You will learn more about it in the "Best Practices" nodes of the roadmap. - -- [@article@Basic Prompting - Learn Prompting](https://learnprompting.org/docs/basics/intro) -- [@guides@Basics of Prompting - Prompt Engineering Guide](https://www.promptingguide.ai/introduction/basics) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompting-introduction/need-for-prompting.md b/src/data/roadmaps/prompt-engineering/content/prompting-introduction/need-for-prompting.md deleted file mode 100644 index 380ebe40f..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompting-introduction/need-for-prompting.md +++ /dev/null @@ -1,30 +0,0 @@ -# Need for Prompt Engineering - -Prompts play a key role in the process of generating useful and accurate information from AI language models. Given below are some of the reasons why "Prompt Engineering" or learning how to write better prompts is important. - -## Guiding Model Behavior - -AI language models perform best when answering questions, assisting with tasks, or producing text in response to a specific query or command. Without prompts, the model would generate content aimlessly, without any context or purpose. A well-crafted prompt helps guide the model's behavior to produce useful and relevant results. - -## Improving Text Quality and Relevance - -Using prompts optimizes the output generated by the AI language model. A clear and concise prompt encourages the model to generate text that meets the required quality and relevance standards. Thus, the need for prompting lies in ensuring the content generated by the AI is of high caliber and closely matches the intent of the user. - -## Eliciting a Specific Type of Output - -Prompts can be engineered to elicit a specific type of output from the AI language model, whether it's summarizing a piece of text, suggesting alternate phrasings, creating an engaging storyline, analysing some sentiment or extracting data from some text. By crafting prompts that focus on the desired output, users can better harness the power and flexibility of AI language models. - -## Aligning AI and Human Intent - -One primary reason for implementing prompts is to align the AI-generated content with the human user's intent. Effective prompting can help minimize the AI's understanding gap and cater to individual users' preferences and needs. - -## Reducing Inaccuracies and Ambiguity - -Prompts can help reduce inaccuracies and ambiguities in the AI's responses. By providing a clear, concise, and complete prompt to the AI, users prevent the model from making unfounded assumptions or providing unclear information. - -In conclusion, the need for prompting stems from its role in guiding AI model behavior, improving text quality and relevance, eliciting a specific output, aligning AI and human intent, and reducing inaccuracies and ambiguity in generated content. By understanding and mastering the art of prompting, users can unlock the true potential of AI language models. - -- [@article@Prompting Basics](https://learnprompting.org/docs/basics/prompting) -- [@video@AI prompt engineering: A deep dive](https://youtu.be/T9aRN5JkmL8?si=3uW2BQuNHLcHjqTv) -- [@video@What is Prompt Tuning?](https://www.youtube.com/watch?v=yu27PWzJI_Y) -- [@guides@What is Prompt Engineering? A Detailed Guide For 2024](https://www.datacamp.com/blog/what-is-prompt-engineering-the-future-of-ai-communication) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/give-conditions.md b/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/give-conditions.md deleted file mode 100644 index 6fb5849ba..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/give-conditions.md +++ /dev/null @@ -1,23 +0,0 @@ -# Give Conditions - -Giving conditions and then asking the model to follow those conditions helps steer the model's responses toward specific behaviors or outcomes. - -For example, you might give the model some long recipe text and ask it to extract the steps from the recipe or to return something else if the no receipe found in the text. In this way, you are making the output conditional giving the model some additional context. - -``` -You will be provided with text delimited by triple quotes. -If it contains a sequence of instructions, \ -re-write those instructions in the following format: - -Step 1 - ... -Step 2 - … -… -Step N - … - -If the text does not contain a sequence of instructions, \ -then simply write \"No steps provided - - -"""INSERT YOUR RECIPE TEXT HERE""" -``` - diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/give-examples.md b/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/give-examples.md deleted file mode 100644 index 1ec8e4593..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/give-examples.md +++ /dev/null @@ -1,42 +0,0 @@ -# Give Successful Examples - -In this technique, you give examples of successful behavior to the model and then ask it to continue the behavior. For example, you might give the model a few examples of successful chess moves and then ask it to continue the game. - -Here is an example of a prompt that uses this technique: - -``` -Read the examples carefully and use them as a basis for your responses. - -Input: Banana -Output: Fruit - -Input: Apple -Output: Fruit - -Input: Carrot -Output: Vegetable - -Given the provided examples, generate the appropriate response for the following inputs: - -- Turnip -- Orange -- Pear -- Potato -- Cucumber -- Celery -- Broccoli -- Cauliflower -``` - -The output of this prompt is: - -``` -- Turnip: Vegetable -- Orange: Fruit -- Pear: Fruit -- Potato: Vegetable -- Cucumber: Vegetable -- Celery: Vegetable -- Broccoli: Vegetable -- Cauliflower: Vegetable -``` \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/include-steps.md b/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/include-steps.md deleted file mode 100644 index 3be303aa6..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/include-steps.md +++ /dev/null @@ -1,65 +0,0 @@ -# Include Steps - -There are times when asking the model for something directly might not result in the best output. In that case, you may want to try breaking down the steps in the same manner that you will perform the action yourself. - -For example, let's say that you have a large story written in english and you want to get the names and the number of names appeared in the story. But you want the names to be written in french. Now, there are two ways to write this prompt i.e. either asking directly for this e.g. - -``` -Give me a JSON object with following keys: `nouns_count` number of nouns appeared in story and `nouns_french` which is an array of nouns in french. The story is delimited by """. - -"""In a charming village, siblings Jack and Jill set out on a quest to fetch water from a hilltop well. As they climbed, singing joyfully, misfortune struck—Jack tripped on a stone and tumbled down the hill, with Jill following suit. Though slightly battered, the pair returned home to comforting embraces. Despite the mishap, their adventurous spirits remained undimmed, and they continued exploring with delight.""" -``` - -The output from this prompt is: - -``` -{ - "nouns_count": 10, - "nouns_french": [ - "village", - "siblings", - "Jack", - "Jill", - "quest", - "water", - "hilltop", - "well", - "stone", - "hill" - ] -} -``` - -As you can see the nouns are not in french. However, if we rewrite our prompt as follows: - -``` -Perform the following steps on the story delimited by """". - -Step 1. Translate it to French -Step 2. List each noun in the translation. -Step 3. Output the JSON object with `nouns_count` number of nouns in french story and `nouns_french` i.e. array of nouns appeared in translation. - -Output the JSON only, I am not interested in the rest of the steps. - -"""In a charming village, siblings Jack and Jill set out on a quest to fetch water from a hilltop well. As they climbed, singing joyfully, misfortune struck—Jack tripped on a stone and tumbled down the hill, with Jill following suit. Though slightly battered, the pair returned home to comforting embraces. Despite the mishap, their adventurous spirits remained undimmed, and they continued exploring with delight.""" -``` - -It will correctly output the following: - -``` -{ - "nouns_count": 10, - "nouns_french": [ - "village", - "frères", - "Jack", - "Jill", - "quête", - "eau", - "sommet de la colline", - "bien", - "pierre", - "colline" - ] -} -``` \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/index.md b/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/index.md deleted file mode 100644 index 7583a12e2..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/index.md +++ /dev/null @@ -1 +0,0 @@ -# Good prompts \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/iterate-refine.md b/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/iterate-refine.md deleted file mode 100644 index f653714dc..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/iterate-refine.md +++ /dev/null @@ -1,12 +0,0 @@ -# Iterate and Refine your prompts. - -Don't think of prompts as a one-and-done process. - -Iterate and refine is a crucial part of creating good prompts. It involves continually refining a prompt until it produces consistently accurate, relevant, and engaging responses. The process works as follows: - -1. **Draft the initial prompt**: Write a prompt that covers the topic you want the AI to discuss. At this stage, focus on making sure the prompt is clear and concise. -2. **Test the prompt**: Submit the prompt to the AI and assess the generated response. Note any issues or inaccuracies in the response. -3. **Revise the prompt**: Based on the observed issues, make adjustments to the prompt. It may involve rephrasing the question, adding more context or details, or specifying the format you want the answer in. -4. **Repeat the process**: Continue testing and refining the prompt until it consistently results in high-quality responses from the AI. - -Remember that sometimes you may need to go through several iterations before arriving at a prompt that works well. By consistently refining prompts and experimenting with different strategies, you'll be more effective at creating prompts that yield accurate and engaging answers from the AI. \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/structured-data.md b/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/structured-data.md deleted file mode 100644 index 2be2e3339..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/structured-data.md +++ /dev/null @@ -1,34 +0,0 @@ -# Structured Output - -When designing prompts for language models, it's often beneficial to request structured output formats such as JSON, XML, HTML, or similar formats. By asking for structured output, you can elicit specific and well-organized responses from the model, which can be particularly useful for tasks involving data processing, web scraping, or content generation. - -Here's how you can request structured output from language models: - -- **Specify the output format:** Clearly specify the output format you want the model to generate. For instance, you can ask the model to generate a JSON object, an HTML page, or an XML document. - -- **Define the structure and fields**: Outline the structure of the desired output and specify the required fields. This helps guide the model to generate responses that adhere to the desired structure. You can provide examples or templates to illustrate the expected format. - -- **Provide input context:** Include relevant information or data in the prompt that the model can utilize to generate the structured output. This context can assist the model in understanding the task or generating more accurate results. - -Here is an example demonstrating the use of structured data. - -``` -Help me generate a JSON object with keys `product` (name of product), `isPositive` (boolean), `summary` (one sentence summary of review) from the text enclosed in tag. - -Regrettably, the "XYZ ProTech 2000" product failed to meet even the most basic expectations. From its lackluster build quality and confusing user interface to its abysmal performance and disappointing customer support, this product left me deeply dissatisfied. If you're considering purchasing the "XYZ ProTech 2000," I strongly advise you to explore alternative options that offer superior quality and reliability. - -``` - -Output from the above prompt: - -```json -{ - "product": "XYZ ProTech 2000", - "isPositive": false, - "summary": "Failed to meet expectations due to lackluster build quality, confusing user interface, abysmal performance, and disappointing customer support." -} -``` - - - - diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/style-information.md b/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/style-information.md deleted file mode 100644 index 6d909ae67..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/style-information.md +++ /dev/null @@ -1,12 +0,0 @@ -# Style Information - -By providing explicit instructions regarding the desired tone, you can influence the language model's writing style and ensure it aligns with your specific requirements. - -Clearly communicate the desired tone, style, or mood in the prompt. Whether it's formal, casual, humorous, professional, or any other specific style, mentioning it explicitly helps guide the model's writing. Also, consider incorporating keywords or phrases that reflect the desired style. For example, if you want a formal tone, include phrases like "in a professional manner" or "using formal language." This provides additional context to the model regarding the tone you expect. - -### Example Prompt - -``` -Write a formal email to decline a job offer. -``` -In this prompt example, the instruction explicitly states the desired tone as "formal." The model understands that the response should reflect a professional and formal writing style appropriate for declining a job offer. diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/use-delimiters.md b/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/use-delimiters.md deleted file mode 100644 index 61e5cd9cc..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/use-delimiters.md +++ /dev/null @@ -1,27 +0,0 @@ -# Use Delimiters - -When crafting prompts for language models, it's crucial to ensure clear separation between the actual data and the instructions or context provided to the model. This distinction is particularly important when using data-driven prompts, where we want the model to generate responses based on specific input information. - -One effective technique to achieve this separation is by using delimiters to mark the boundaries between the prompt and the data. Delimiters act as clear indicators for the model to understand where the data begins and ends, helping it to generate responses more accurately. - -Here's how you can use delimiters effectively: - -- **Choose appropriate delimiters:** Select delimiters that are unlikely to appear naturally in the data. Commonly used choices include special characters or token combinations that rarely occur in the given context. For instance, you can use triple curly braces (`{{{ }}}`) or a special token like `<|data|>` as delimiters. - -- **Position the delimiters correctly:** Place the delimiters at the beginning and end of the data section, while ensuring a clear separation from the prompt. The prompt portion should precede the delimiter, providing the necessary instructions or context for the model. - -- **Use consistent delimiters throughout:** Maintain consistency in using the chosen delimiters across all prompts. This ensures uniformity in the data format, making it easier for the model to understand and process the information consistently. - -## Examples - -``` -Summarize the text delimited by triple curly braces into a single sentence. - -{{{put_your_text_here}}} -``` - -``` -Translate the text delimited by triple quotes into Arabic. - -"""How are you?""" -``` diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/workout-solution.md b/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/workout-solution.md deleted file mode 100644 index 05ff03e26..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/good-prompts/workout-solution.md +++ /dev/null @@ -1,3 +0,0 @@ -# Workout its Solution - -LLM Models try to jump to solutions as soon as possible. They are not interested in the process of solving a problem. Sometimes giving strict instructions help get better results. \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/index.md b/src/data/roadmaps/prompt-engineering/content/prompts/index.md deleted file mode 100644 index ae8ee5986..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/index.md +++ /dev/null @@ -1,8 +0,0 @@ -# Prompts - -At this point, you probably already know what the Prompts are and the importance of writing good prompts. This section covers the best practices for writing good prompts as well as covering some of the commonly used prompting techniques. - -Learn more from the following resources: - -- [@guides@Prompt Engineering Guide: The Ultimate Guide to Generative AI](https://learnprompting.org/docs/introduction) -- [@guides@Optimizing Prompts | Prompt Engineering Guide](https://www.promptingguide.ai/guides/optimizing-prompts) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/parts-of-a-prompt.md b/src/data/roadmaps/prompt-engineering/content/prompts/parts-of-a-prompt.md deleted file mode 100644 index b785c7399..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/parts-of-a-prompt.md +++ /dev/null @@ -1,24 +0,0 @@ -# Parts of a Prompt - -When constructing a prompt, it's essential to understand the different parts that contribute to its effectiveness. A well-crafted prompt typically consists of **context**, **instruction**, and **example**. Understanding these parts will allow you to engineer prompts that elicit better and more precise responses. - -- **Context:** The context sets the stage for the information that follows. This may include defining key terms, describing relevant situations, or simply providing background information. Context helps the AI to understand the general theme or subject matter being addressed in the prompt. - - *Example: In a writing request about composing an email, you may provide context by describing the purpose or background of the email, such as a follow-up after a meeting.* - -2. **Instruction:** The instruction is the core component of the prompt. This is where you explicitly state the task or question that the AI is expected to perform or answer. It's important to be clear and direct with your instructions, specifying any guidelines or criteria for the response. - - *Example: Using the email scenario, you could instruct the AI to "Write a follow-up email thanking the recipient for their time and summarizing the main discussion points of the meeting."* - -3. **Example:** In some cases, it's helpful to provide one or more examples to guide or clarify the desired output. Examples can serve as a model for the AI and give an idea of what a successful response should look like. This is especially useful when the task is complex or has specific formatting requirements. - - *Example: To further clarify the email-writing task, you might provide a brief example of the tone or structure you want, such as "Dear [Recipient], Thank you for taking the time to meet with me yesterday. We discussed [topic 1], [topic 2], and [topic 3]. I look forward to our future collaboration."* - -By considering these three parts of a prompt — context, instruction, and example — you can create effective and well-formed prompts that produce targeted and accurate responses from the AI. - -Learn more from the following resources: - -- [@guides@Understanding Prompt Structure: Key Elements of a Prompt](https://learnprompting.org/docs/basics/formalizing) -- [@guides@Elements of a Prompt | Prompt Engineering Guide](https://www.promptingguide.ai/introduction/elements) -- [@video@Building with Anthropic Claude: Prompt Workshop ](https://youtu.be/hkhDdcM5V94?si=mW-GLSvi7TMg33wx) -- [@guides@General Tips for Designing Prompts | Prompt Engineering Guide](https://www.promptingguide.ai/introduction/tips) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/chain-of-thought.md b/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/chain-of-thought.md deleted file mode 100644 index c62df7970..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/chain-of-thought.md +++ /dev/null @@ -1,30 +0,0 @@ -# Chain of Thought - -In the world of prompt engineering, the **Chain of Thought** technique is an essential tool aimed at generating thoughtful and meaningful responses. By engaging the model in a step-by-step thinking process, this technique encourages the exploration of concepts, ideas, or problem-solving strategies in a sequential manner. - -## How Does it Work? - -This method involves breaking down a complex topic or issue into smaller, manageable segments that stimulate the logical progress of thought in the model, leading to a coherent and well-structured response. It's comparable to leading the model on a cognitive journey where ideas and concepts are connected in a logical and meaningful order. - -## Example - -To illustrate the application of the Chain of Thought technique, let's say we want the model to analyze the advantages and disadvantages of working from home. - -Instead of asking a broad question like: - -> "What are the advantages and disadvantages of working from home?" - -We can approach the topic through a series of connected prompts: - -- "List three reasons why people might prefer working from home." -- "For each reason you mentioned, explain the benefits and positive effects on the individual and/or the organization." -- "Now, consider the challenges of working from home. Identify three potential disadvantages or negative effects." -- "For each of these challenges, discuss how individuals and organizations can mitigate or address them." - -By employing the Chain of Thought technique, we have directed the model to provide a thorough and systematic analysis of the subject in question, ultimately resulting in a more meaningful and accurate response. - -Learn more from the following resources: - -- [@article@Chain-of-Thought Prompting | Prompt Engineering Guide](https://www.promptingguide.ai/techniques/cot) -- [@article@Chain-of-Thought Prompting](https://learnprompting.org/docs/intermediate/chain_of_thought) -- [@guides@Introduction to Thought Generation Prompting Techniques for LLMs](https://learnprompting.org/docs/advanced/thought_generation/introduction) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/combining-techniques.md b/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/combining-techniques.md deleted file mode 100644 index 4b318fdc3..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/combining-techniques.md +++ /dev/null @@ -1,11 +0,0 @@ -# Combining Techniques - -All the techniques we've covered so far are useful on their own, but they're even more powerful when combined. For example, you can combine "Role Prompting" and any other prompting technique e.g. Chain of Thought, Dual Prompt, etc. to get more specific responses. - -Learn more from the following resources: - -- [@article@Combining Prompting Techniques: Enhance AI Outputs](https://learnprompting.org/docs/basics/combining_techniques) -- [@course@Combining prompting techniques - Hyperskill](https://hyperskill.org/learn/step/47989) -- [@guides@The Ultimate Guide to LLM Prompting, Fine-tuning, and Data Management ](https://medium.com/@subhraj07/the-ultimate-guide-to-llm-prompting-fine-tuning-and-data-management-933bbd2d05f4) -- [@video@4 Methods of Prompt Engineering - IBM](https://www.youtube.com/watch?v=1c9iyoVIwDs) - diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/dual-prompt.md b/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/dual-prompt.md deleted file mode 100644 index 1b3c33dc2..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/dual-prompt.md +++ /dev/null @@ -1,34 +0,0 @@ -# Dual Prompt Approach - -**Dual Prompt** is a technique that combines two or more prompts to generate more specific and meaningful responses. This approach can be used to elicit more detailed information or to narrow down the focus of a response. - -For example, let's say you are writing a guide about SEO. You could ask AI to write it for you with the following prompt: - -``` -Write me a guide about SEO. -``` - -However, this prompt may result in a generic guide without giving you what you may need. - -By adopting dual prompt, you will receive a more specific response that is tailored to your needs. For the above example, we could write our prompt in two prompts as follows: - -``` -I am writing a guide about SEO. Give me 10 key topics that I should cover in this guide. -``` - -Now you can give it a second prompt: - -``` -Write me a detailed guide about each of the points you gave above. -``` - -Or you could also combine these prompts into a single prompt as follows: - -``` -I am writing a guide about SEO. Take the 10 key topics about SEO and write a detailed introduction to each. -``` - -Learn more from the following resources: - -- [@article@Generated Knowledge in Prompts: Boosting AI Accuracy and Reliability](https://learnprompting.org/docs/intermediate/generated_knowledge#dual-prompt-approach) -- [@article@Recitation-augmented and Dual Prompting](https://aimlapi.com/academy-articles/recitation-augmented-and-dual-prompting) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/few-shot-prompting.md b/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/few-shot-prompting.md deleted file mode 100644 index 8fbdcb0d7..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/few-shot-prompting.md +++ /dev/null @@ -1,38 +0,0 @@ -# Few Shot Prompting - -Few-shot prompting is a technique in which a machine learning model is primed with a small number of examples (or "shots") that demonstrate the desired behavior, output, or task, before being presented with a new, related input. This approach allows the model to build an understanding of what is expected of it, even with limited context. It is particularly valuable for fine-tuning and generalizing large pre-trained models such as OpenAI's GPT-3. - -## Key Principles - -When using few-shot prompting, consider the following: - -- **Number of examples**: A few-shot setting typically involves 2-10 examples (but can vary), depending on the clarity and complexity of the task. -- **Context and relevancy**: The examples should be relevant to the desired task and provide an adequate basis for shaping the model's output. -- **Balance**: Strive for a balance between too few examples (under-specification) and too many examples (repetition and over-specification). - -## Examples & Tips - -Consider the following example for a sentiment-analysis task using few-shot prompting. You provide some labeled input/output pairs to the model, which helps it understand your expectations: - -``` -The movie was fantastic! - Positive -I didn't enjoy the food at all. - Negative -Amazing vacation, I had a great time! - Positive -She looks upset and angry. - Negative -``` - -After providing these examples, introduce the query you want the model to analyze: - -``` -The book was hard to put down. - {sentiment_label} -``` - -This prompt structure assists the model in grasping the sentiment analysis task and increases the likelihood of getting the correct output (i.e., "Positive"). - -Remember to experiment with the number of examples and their content to find the optimal balance for your specific task. Additionally, you can use inline instructions to guide the model further, such as asking it to classify the sentiment of a given sentence. - -Learn more from the following resources: - -- [@article@Few-Shot Prompting | Prompt Engineering Guide](https://www.promptingguide.ai/techniques/fewshot) -- [@article@Shot-Based Prompting: Zero-Shot, One-Shot, and Few-Shot Prompting](https://learnprompting.org/docs/basics/few_shot) -- [@guides@Introduction to Advanced Few-Shot Prompting Techniques](https://learnprompting.org/docs/advanced/few_shot/introduction) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/index.md b/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/index.md deleted file mode 100644 index 1d9a7ee29..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/index.md +++ /dev/null @@ -1 +0,0 @@ -# Prompting techniques \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/least-to-most.md b/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/least-to-most.md deleted file mode 100644 index f1551acbe..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/least-to-most.md +++ /dev/null @@ -1,96 +0,0 @@ -# Least to Most Prompting - -Least to Most prompting takes Chain of Thought (CoT) prompting a step further by first breaking a problem into sub problems then solving each one. It is a technique inspired by real-world educational strategies for children. - -As in CoT prompting, the problem to be solved is decomposed in a set of subproblems that build upon each other. In a second step, these subproblems are solved one by one. Contrary to chain of thought, the solution of previous subproblems is fed into the prompt trying to solve the next problem. - -## Example - -Let's ask a slightly complicated customer service question: - -``` -CUSTOMER INQUIRY: -I just bought a T-shirt from your Arnold collection on March 1st. -I saw that it was on discount, so bought a shirt that was originall $30, and got 40% off. -I saw that you have a new discount for shirts at 50%. -I'm wondering if I can return the shirt and have enough store credit to buy two of your shirts? - -INSTRUCTIONS: -You are a customer service agent tasked with kindly responding to customer inquiries. -Returns are allowed within 30 days. -Today's date is March 29th. -There is currently a 50% discount on all shirts. Shirt prices range from $18-$100 at your store. -Do not make up any information about discount policies. -``` - -Output from the model would be: - -``` -Thank you for your inquiry. Unfortunately, since you purchased the shirt on March 1st, it is outside of our 30-day return policy. -However, we do have a 50% discount on all shirts currently, so you could purchase two shirts with the same amount of store credit. -Prices for our shirts range from $18-$100. Please let us know if you have any other questions. -``` - -That failed (we are within the return time), so lets try to break it down into subproblems: - -``` -CUSTOMER INQUIRY: -I just bought a T-shirt from your Arnold collection on March 1st. -I saw that it was on discount, so bought a shirt that was originall $30, and got 40% off. -I saw that you have a new discount for shirts at 50%. I'm wondering if -I can return the shirt and have enough store credit to buy two of your shirts? - -INSTRUCTIONS: -You are a customer service agent tasked with kindly responding to customer inquiries. -Returns are allowed within 30 days. -Today's date is March 29th. -There is currently a 50% discount on all shirts. -Shirt prices range from $18-$100 at your store. -Do not make up any information about discount policies. -What subproblems must be solved before answering the inquiry? -``` - -Output from the model would be: - -``` -1. Determine if the customer is within the 30-day return window. -2. Calculate the amount of store credit the customer would receive if they returned the shirt. -3. Calculate the cost of two shirts at the current 50% discount. -4. Determine if the customer has enough store credit to purchase two shirts. -``` - -Let's try to solve the first subproblem: - -``` -CUSTOMER INQUIRY: -I just bought a T-shirt from your Arnold collection on March 1st. -I saw that it was on discount, so bought a shirt that was originally $30, and got 40% off. -I saw that you have a new discount for shirts at 50%. -I'm wondering if I can return the shirt and have enough store credit to buy two of your shirts? - -INSTRUCTIONS: -You are a customer service agent tasked with kindly responding to customer inquiries. -Returns are allowed within 30 days. -Today's date is March 29th. -There is currently a 50% discount on all shirts. -Shirt prices range from $18-$100 at your store. -Do not make up any information about discount policies. -Determine if the customer is within the 30-day return window. -Let's go step by step. -``` - -Output from the model would be: - -``` -Yes, the customer is within the 30-day return window. -The customer purchased the shirt on March 1st, and today's date is March 29th, which is within the 30-day return window. - -Therefore, the customer is eligible to return the shirt and receive store credit for the purchase. -The customer can use the store credit to purchase two shirts at the current 50% discount. -``` - -By just solving the first subproblem, we were able to solve the entire problem. If GPT-3 did not return an answer immediately, we could have solved the next subproblem and so on until it did return an answer. Note that we use Let's go step by step.. The addition of this phrase is not always necessary, but it helps for this example. - -Learn more from the following resources: - -- [@article@Least-to-Most Prompting](https://learnprompting.org/docs/intermediate/least_to_most) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/role-prompting.md b/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/role-prompting.md deleted file mode 100644 index 27d1e8200..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/role-prompting.md +++ /dev/null @@ -1,22 +0,0 @@ -# Role Prompting - -Role prompting is a technique used in prompt engineering to encourage the AI to approach a question or problem by assuming a specific role, character, or viewpoint. This strategy can lead to a more focused, creative, or empathetic response depending on the given role. - -## How to use Role Prompting - -1. **Identify a role or character:** Determine a character or role that will be compelling and relevant to the problem or question you're posing. This could be a real or fictional character or a general professional role. - -2. **Provide context:** Set a scene or introduce the role so the AI knows the context in which it should respond. This can help to encourage responses that align closely with the character's attributes or profession. - -3. **Pose the question or task:** Now, with the context and role present, ask the question or set the task you want the AI or user to respond to. Make sure it's explicitly related to the chosen role. - -## Example of Role Prompting - -Imagine you want to explore solutions to an environmental problem. You can use role prompting to elicit diverse perspectives and insights. Here's an example prompt with role prompting: - -_As a climate scientist, how would you approach the problem of deforestation to minimize its impact on the environment?_ - -Learn more from the following resources: - -- [@article@Assigning Roles to Chatbots](https://learnprompting.org/docs/basics/roles) -- [@article@Revisiting Roles in Prompting](https://learnprompting.org/docs/intermediate/revisiting_roles) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/zeroshot-chain-of-thought.md b/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/zeroshot-chain-of-thought.md deleted file mode 100644 index 5e5afad68..000000000 --- a/src/data/roadmaps/prompt-engineering/content/prompts/prompting-techniques/zeroshot-chain-of-thought.md +++ /dev/null @@ -1,27 +0,0 @@ -# Zero Shot Chain of Thought - -Zeroshot chain of thought is a prompting technique that encourages models to provide multi-step reasoning or follow a series of interconnected thoughts in order to tackle a given problem. This technique is particularly effective in tasks where the answer requires a reasoning process or depends on chaining several intermediate ideas together. - - -How to implement a zeroshot chain of thought prompt: - -- Start by defining a clear initial question or problem that will serve as the starting point for the chain. -- Craft a prompt that not only asks the model to provide an answer to the initial question, but also requests that the model explain its reasoning step by step. -- Encourage the model to consider intermediate steps, possible alternatives, or connections between ideas explicitly in its response. - -## Example - -Suppose you want the model to explain how a solar panel works. A zeroshot chain of thought prompt could look like this: - -``` -Please explain the process of how a solar panel works, starting with sunlight hitting the panel's surface and ending with electricity being produced. Structure your response as a step-by-step chain of thought, taking care to clarify how each step leads to the next. -``` - -By designing prompts that explicitly request step-by-step reasoning, the zeroshot chain of thought technique can lead to more comprehensive and insightful answers that go beyond simple factual statements. - -Learn more from the following resources: - -- [@article@Zero-Shot Prompting | Prompt Engineering Guide](https://www.promptingguide.ai/techniques/zeroshot) -- [@article@Zero-Shot CoT Prompting: Improving AI with Step-by-Step Reasoning](https://learnprompting.org/docs/intermediate/zero_shot_cot) -- [@article@Shot-Based Prompting: Zero-Shot, One-Shot, and Few-Shot Prompting](https://learnprompting.org/docs/basics/few_shot) -- [@guides@Introduction to Advanced Zero-Shot Prompting Techniques](https://learnprompting.org/docs/advanced/zero_shot/introduction) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/rag@gxydtFKmnXNY9I5kpTwjP.md b/src/data/roadmaps/prompt-engineering/content/rag@gxydtFKmnXNY9I5kpTwjP.md new file mode 100644 index 000000000..ee0679087 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/rag@gxydtFKmnXNY9I5kpTwjP.md @@ -0,0 +1 @@ +# RAG \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/react-prompting@8Ks6txRSUfMK7VotSQ4sC.md b/src/data/roadmaps/prompt-engineering/content/react-prompting@8Ks6txRSUfMK7VotSQ4sC.md new file mode 100644 index 000000000..12743c9f0 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/react-prompting@8Ks6txRSUfMK7VotSQ4sC.md @@ -0,0 +1 @@ +# ReAct Prompting \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/real-world/coding-assistance.md b/src/data/roadmaps/prompt-engineering/content/real-world/coding-assistance.md deleted file mode 100644 index d9f789930..000000000 --- a/src/data/roadmaps/prompt-engineering/content/real-world/coding-assistance.md +++ /dev/null @@ -1,159 +0,0 @@ -# Coding Assistance - -Large Language Models (LLMs) tailored for coding have become invaluable tools for developers, offering a wide range of functionalities to assist in coding tasks. These include debugging, code generation, reformatting and reviewing, translating between programming languages, refactoring code, program simulations, and generating unit tests. Recent advancements have expanded their capabilities to include code optimization, documentation generation, bug fixing, project management assistance, security analysis, and version control support. New features also allow users to interact with outputs directly, such as editing or refining code suggestions with or without additional prompts, like OpenAI's Canvas or Anthropic's Artefacts. These advancements make LLMs' capabilities available across the software development process, enhancing productivity and streamlining workflows across various stages of coding and project management. - -## Code Generation - -Instructing ChatGPT to generate code is as simple as this: - -``` -Act as a junior python programmer. Please write the code to generate madlibs from 3 arrays of 6 items each: prepositions, verbs and nouns, with the nouns also being used as direct objects. After writing it, execute the code. -``` - -The output will be something like: - -```python -import random - -prepositions = ["at", "in", "on", "with", "under", "over"] -verbs = ["run", "jump", "dance", "sing", "laugh", "cry"] -nouns = ["cat", "dog", "tree", "flower", "book", "computer"] - -preposition = random.choice(prepositions) -verb = random.choice(verbs) -noun = random.choice(nouns) - -madlib = f"I {verb} {preposition} the {noun}." - -print(madlib) -``` - -## Commenting and Reformatting Code - -You can ask ChatGPT to add comments to your code, and reformat it for legibility. After your prompt instruction, add three hashmarks and then paste in the code you'd like it to clean up: - -``` -Please add line comments to this Python code and reformat it for legibility: - -import math -amplitude = 30 -frequency = 2 -num_points = 50 -for i in range(num_points): - y = int(round(amplitude * math.sin(2 * math.pi * frequency * i / num_points))) - print(' ' * (amplitude + y) + '*') -``` - -The output will be something like: - -```python -# Import the math module -import math - -# Set the amplitude, frequency, and number of points in the wave -amplitude = 30 -frequency = 2 -num_points = 50 - -# Loop over each point in the wave -for i in range(num_points): - # Calculate the y value of the wave using the sine function from the math module - y = int(round(amplitude * math.sin(2 * math.pi * frequency * i / num_points))) - - # Print a space for padding, followed by an asterisk at the appropriate position on the wave - print(" " * (amplitude + y) + "*") -``` - -## Debugging - -Not only can ChatGPT detect syntax errors in code, but it can also find logical errors that would crop up when code is executed. Below is an example of a Python script that eventually causes a division by zero error on line 4 due to a logical error on line 3. Try this simple prompt to find and fix the error: - -``` -Please debug this Python code: - -1. pizza_cost = 100 -2. partygoers = 10 -3. while partygoers = 0: -4. cost_per_person = pizza_cost / partygoers -5. print("the cost per person is $", "{:.2f}".format(cost_per_person)) -6. partygoers -= 2 -``` - -The output will include the corrected while loop, which is changed to ensure the denominator variable stays positive: - -``` -3. while partygoers > 0: -``` - -For more nuanced errors, you may need to add more specifics: the error message, environment variables, included libraries or variations in inputs. - -## Optimizing Code - -If your script is taking too long to execute, ChatGPT can optimize it. Here's an example of optimizing an inefficient script that finds prime numbers: - -``` -Act like a very senior python developer. Please optimize this script: - -for num in range(1, 101): - if num 1: - for i in range(2, num): - if (num % i) == 0: - break - else: - print(num) -``` -The output might be a script that uses the Sieve of Eratosthenes algorithm for finding prime numbers: - -```python -numbers = list(range(2, 101)) -for i in numbers: - for j in range(i * 2, 101, i): - if j in numbers: - numbers.remove(j) -print(numbers) -``` - -Note that we asked the bot to "act like a senior developer" to optimize the script. You can also dictate that it have a certain area of expertise (e.g., sorting algorithms) or number of years of experience. Alternatively, if you have a script that seems overly complicated, you can ask ChatGPT to write that script "as a very junior developer." - -## Translating Between Programming Languages - -A very useful feature ChatGPT offers is the ability to port programs from one language to another. A typical use for this might be upgrading code in an older language to a more modern one: - -``` -Act like an experienced developer with knowledge of both COBOL and Python. Please convert this COBOL program to Python: - - -IDENTIFICATION DIVISION. -PROGRAM-ID. CALCULATE-SS-TAX. - -DATA DIVISION. -WORKING-STORAGE SECTION. -01 SALARY PIC 9(7)V99. -01 SS-TAX PIC 9(7)V99. - -PROCEDURE DIVISION. -DISPLAY "Enter employee's salary: ". -ACCEPT SALARY. - -COMPUTE SS-TAX = SALARY * 0.062. - -DISPLAY "Social Security tax is: $", SS-TAX. -STOP RUN. -``` - -The output might be something like this: - -```python -salary = float(input("Enter employee's salary: ")) -ss_tax = salary * 0.062 - -print("Social Security tax is: $", round(ss_tax, 2)) -``` - -Learn more from the following resources: - -- [@article@LLM Coding Assistance](https://learnprompting.org/docs/basic_applications/coding_assistance) -- [@guides@Generating Code | Prompt Engineering Guide](https://www.promptingguide.ai/applications/coding) -- [@article@Prompt engineering 101 for developers](https://www.pluralsight.com/resources/blog/software-development/prompt-engineering-for-developers) -- [@guide@Prompt Engineering for Coding Tasks ](https://towardsdatascience.com/prompt-engineering-llms-coding-chatgpt-artificial-intelligence-c16620503e4e) -- [@article@How GenAI Transforms Software Development: 11 Use Cases](https://www.index.dev/blog/11-generative-ai-use-cases-software-development) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/real-world/designing-chatbots.md b/src/data/roadmaps/prompt-engineering/content/real-world/designing-chatbots.md deleted file mode 100644 index 0e747c265..000000000 --- a/src/data/roadmaps/prompt-engineering/content/real-world/designing-chatbots.md +++ /dev/null @@ -1,9 +0,0 @@ -# Designing Chatbots - -Designing chatbots and other interfaces for real-world usage cases remains a key focus in the tech industry. Large language models (LLMs) continue to enhance chatbot capabilities, allowing them to respond effectively to diverse user inputs with minimal effort and training. The use cases for new user interfaces is expanding beyond traditional chatbots like: voice interfaces, gesture-based interfaces, multimodal AI systems using various input types, AI agents performing complex tasks autonomously, extended reality (XR) combining AI with AR/VR for immersive experiences. These advancements highlight the evolving nature of user interfaces, driven by technological innovations that promise more engaging and efficient interactions. - -Learn more from the following resources: - -- [@video@What is a Chatbot?](https://www.youtube.com/watch?v=o9-ObGgfpEk) -- [@video@Do Chatbots Need AI?](https://www.youtube.com/watch?v=93l-2Usc08U) -- [@video@Generative vs Rules-Based Chatbots](https://www.youtube.com/watch?v=lZjUS_8btEo) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/real-world/index.md b/src/data/roadmaps/prompt-engineering/content/real-world/index.md deleted file mode 100644 index 248c0addd..000000000 --- a/src/data/roadmaps/prompt-engineering/content/real-world/index.md +++ /dev/null @@ -1,24 +0,0 @@ -# Real World Usage Examples - -LLMs are being used in an increasing number and variety of usages, applications and embedded approaches. These use cases are dynamic and ever evolving. - -### Examples - -Here are some examples of how LLMs are used in the real world, though not exhaustive: - -- Natural Language Processing -- Information Retrieval -- Internet Search & Citations -- Question Answering -- Text Generation -- Text Summarization -- Learning Tools -- Chatbots and Assistants -- Agents & Automation -- Multimedia Content - -Follow the resources and latest updates to learn more: - -- [@article@Solving Everyday Tasks with GenAI: Emails, Contracts, and More](https://learnprompting.org/docs/basic_applications/introduction) -- [@article@LLM Applications & Guides | Prompt Engineering Guide](https://www.promptingguide.ai/applications) -- [@article@7 Large Language Model (LLM) Use Cases and Applications: How AI is Reshaping Industries ](https://collabnix.com/7-top-large-language-model-llm-use-cases-and-applications-how-ai-is-reshaping-industries/) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/real-world/inferring.md b/src/data/roadmaps/prompt-engineering/content/real-world/inferring.md deleted file mode 100644 index ea3b349e1..000000000 --- a/src/data/roadmaps/prompt-engineering/content/real-world/inferring.md +++ /dev/null @@ -1,33 +0,0 @@ -# Inferring - -Making use of LLMs to infer information from text, i.e. text analysis, is another common use case. A common use case is sentiment analysis, such as grouping e-commerce reviews by sentiment. Beyond this, LLMs have diverse real-world applications, including customer service automation, market research, fraud detection, compliance monitoring, speech-to-text transcription, and anomaly detection. These applications demonstrate the versatility of LLMs in extracting valuable insights from text data, automating processes, and enhancing decision-making across different sectors. By leveraging LLMs, businesses can improve operational efficiency, gain deeper customer insights, and tackle complex challenges in areas such as cybersecurity and regulatory compliance - -### Example - -Imagine you are running a famous e-commerce store with thousands of reviews. You want to group the reviews by the sentiment of the review. You can use LLMs to infer the sentiment of the review. - -Here are some prompts showing the use of inferrence: - -``` -What is the sentiment of this text which is delimited with tripple backticks. Your output must be a single word. - -"""Horrible product. They charged me twice the price and it broke after a week. I will never buy from them again.""" -``` - -The output from this prompt is `negative`. - -Another example: - -``` -Identify a list of emotions that the writer of the following review is expressing. Include no more than 5 items in the list. Format your answers as a list of lower-case words separated by commas. - -"""I am so happy with this product. It is the best thing I have ever bought. I will definitely buy from them again.""" -``` - -Output: `happy, satisfied, pleased, content, enthusiastic` - -Learn more from the following resources: - -- [@article@Generating Data | Prompt Engineering Guide](https://www.promptingguide.ai/applications/generating) -- [@article@Every Day Prompt Engineering - Text Analysis](https://www.ikangai.com/every-day-prompt-engineering-part-5-text-analysis/) -- [@course@Prompt Engineering Course — Inferring, Transforming, and Expanding with ChatGPT ](https://medium.com/geekculture/prompt-engineering-course-openai-inferring-transforming-expanding-chatgpt-chatgpt4-e5f63132f422) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/real-world/structured-data.md b/src/data/roadmaps/prompt-engineering/content/real-world/structured-data.md deleted file mode 100644 index d893a6d8b..000000000 --- a/src/data/roadmaps/prompt-engineering/content/real-world/structured-data.md +++ /dev/null @@ -1,66 +0,0 @@ -# Structured Data - -Structured data is a powerful tool for organizing information effectively, particularly in real-world applications like e-commerce, mobile, and search. It helps convert user inputs into structured formats such as JSON, Markdown tables, HTML, and YAML, enhancing data management and retrieval. Recent improvements in this field include Google's expanded product information types, the increasing use of AI to automate and enhance structured data processes, the continued preference for JSON-LD as a structured data format, and enhanced mobile experiences through rich media and detailed information in search results. - -## Examples - -### Example: JSON Object - -Consider a user searching for a "Birthday gift for my 18 months old daughter." Using prompts, you can generate structured data in various formats. - -JSON Example: - -```json -Print a JSON object containing: -- `gender` ("male", "female"), -- `occasion` (one of "party", "birthday", "anniversary"), -- `age_years` (numeric value) -from the text delimited by triple quotes: - """Birthday gift for my 18 months old daughter""" - -``` - -**Output** -The model would generate the following JSON object: - -```json -{ - "gender": "female", - "occasion": "birthday", - "age_years": 1.5 -} - -``` - -The following examples extend the original Print JSON prompt to convert one structured data into other formats, demonstrating another capability with structured data, data format interchange from a single source prompt. - -### Example: Markdown Table - -From JSON to Markdown table - -```markdown -| Gender | Occasion | Age (years) | -|--------|----------|-------------| -| Female | Birthday | 1.5 | -``` - -### Example: HTML Table - -From JSON to HTML table - - - - - - - - - - - - -
GenderOccasionAge (years)
FemaleBirthday1.5
- -Learn more from the following resources: - -- [@article@How to Use AI to Organize Data into Tables for Reports and Articles](https://learnprompting.org/docs/basic_applications/table_generation) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/real-world/study-buddy.md b/src/data/roadmaps/prompt-engineering/content/real-world/study-buddy.md deleted file mode 100644 index 6e4db8003..000000000 --- a/src/data/roadmaps/prompt-engineering/content/real-world/study-buddy.md +++ /dev/null @@ -1,52 +0,0 @@ -# Study Buddy - -LLMs have become powerful study & tutoring tools, offering various applications in education. They can explain complex terms, generate quizzes, and provide personalized learning experiences adapted to individual needs, including neurodivergent learning styles. Recent developments have expanded their capabilities to include multilingual support, automated grading and feedback, content creation for diverse educational materials, and improved accessibility for all students. These advancements enable LLMs to enhance comprehension, retention, and overall educational outcomes while allowing educators to focus on more complex tasks. - -## Explaining Terms - -I was recently reading a textbook about Cacti1, and saw this passage: - -``` -Because of adaptations to xeric conditions and the presence of **less derived woods** in Pereskioideae, the Cactaceae is one of the most interesting families both anatomically and morphologically. -``` - -I had no idea what less derived woods meant, and Google only showed research articles on it. I decided to use ChatGPT to find out what it meant. I gave it the following prompt, which is simple the passage from the book and my question. - -``` -Because of adaptations to xeric conditions and the presence of less derived woods in Pereskioideae, the Cactaceae is one of the most interesting families both anatomically and morphologically. - -What do less derived woods mean? -``` - -The model responded with: - -``` -In this context, "less derived woods" refers to the ancestral or primitive form of wood found in the early evolution of plants. These woods lack specialized structures and features that evolved later in more derived or advanced plants. In the Pereskioideae subfamily of the Cactaceae family, the wood is less derived compared to other subfamilies and is more similar to the wood found in non-cactus plants. The less derived wood in Pereskioideae is thought to be an adaptation to the more humid and less xeric environments in which these plants grow compared to other cacti. -``` - -The model was able to answer my question quickly and I was able to continue with my reading. - -## Quizzing Yourself - -Another way to use LLMs as a study tool is to quiz yourself. If you have an upcoming calculus test, you could simply ask: - -``` -Generate 5 calculus I quiz questions for me: -``` - -And the model will generate 5 questions for you to answer. - -``` -1. What is the derivative of y = x3? -2. Evaluate the indefinite integral of x3 + 5x2 + 7x + 4. -3. What is the product rule for derivatives? -4. Find the equation of the line tangent to y = x2 at the point (4, 16). -5. What is the second derivative of y = x4? -``` - -There are probably other ways to use LLMs to study. Many learning companies like Duolingo and Quizlet are implementing LLMs into their products. - -Learn more from the following resources: - -- [@article@LLMs as a Study Buddy](https://learnprompting.org/docs/basic_applications/study_tool) -- \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/real-world/writing-emails.md b/src/data/roadmaps/prompt-engineering/content/real-world/writing-emails.md deleted file mode 100644 index e87920f97..000000000 --- a/src/data/roadmaps/prompt-engineering/content/real-world/writing-emails.md +++ /dev/null @@ -1,34 +0,0 @@ -# Writing Emails - -Language Models, like ChatGPT, can be incredibly beneficial for writing emails due to their ability to generate coherent and contextually appropriate text. When composing an email, an LLM can provide assistance with various aspects, such as generating engaging introductions or addressing specific questions or concerns. For instance, if you're writing a sales email, you can prompt the LLM with the request to generate a compelling opening paragraph that captures the recipient's attention. Similarly, when responding to customer inquiries, you can ask the LLM to help craft clear and informative responses that address the query with precision. - -For example, here is a prompt to generate an automated email response to a customer inquiry: - -``` -Write an email informing a customer that their inquiry has been received and will be addressed shortly. Include the order number and the customer's name in the email. -``` - -Here is the response generated by the LLM: - -``` -Subject: Confirmation - Inquiry Received - Order #123456 - -Dear [Customer's Name], - -I'm writing to confirm that we have received your inquiry regarding your recent order. We appreciate you taking the time to reach out to us. - -Your order number is #123456, and we understand the importance of your inquiry. Rest assured, our team is already reviewing your case and will address it promptly. We strive to provide a timely response and a satisfactory resolution to all customer inquiries. - -If we require any additional information or clarification, we will reach out to you directly. In the meantime, please feel free to contact us if you have any further questions or concerns about your order. - -We value your business and are committed to ensuring your complete satisfaction. Thank you for choosing our company, and we appreciate your patience as we work on addressing your inquiry. - -Best regards, -AI Customer Service -``` - -Learn more from the following resources: - -- [@article@Write Emails Faster Using AI: Sick Days, Cold Outreach, and More](https://learnprompting.org/docs/basic_applications/writing_emails) -- [@guides@AI Tools for Business Writing](https://communicationprogram.wharton.upenn.edu/library/ai-tools/) -- [@article@Everyday Prompt Engineering -Emails](https://www.ikangai.com/everyday-prompt-engineering-part-one-emails/) \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/reliability/calibrating-llms.md b/src/data/roadmaps/prompt-engineering/content/reliability/calibrating-llms.md deleted file mode 100644 index effd01354..000000000 --- a/src/data/roadmaps/prompt-engineering/content/reliability/calibrating-llms.md +++ /dev/null @@ -1,28 +0,0 @@ -# Calibrating LLMs - -Calibration refers to the process of adjusting the model to produce responses that are consistent with human-defined ratings, rankings, or scores. - -## Importance of Calibration - -Calibrating the LLMs helps to: - -1. Minimize system biases and improve response quality. -2. Increase the alignment between user expectations and the model's output. -3. Improve the interpretability of the model's behavior. - -## Calibration Techniques - -There are various techniques to calibrate LLMs that you can explore, including: - -1. **Prompt Conditioning**: Modifying the prompt itself to encourage desired behavior. This involves using explicit instructions or specifying the format of the desired response. -2. **Response Rankings**: Presenting the model with multiple potential responses and asking it to rank them by quality or relevance. This technique encourages the model to eliminate inappropriate or low-quality responses by assessing them against other possible answers. -3. **Model Debiasing**: Applying debiasing techniques, such as counterfactual data augmentation or fine-tuning the model with diverse, bias-mitigating training data. -4. **Temperature Adjustment**: Dynamically controlling the randomness or 'temperature' parameter during the inference to balance creativity and coherence of the output. - -### Iterative Calibration - -Calibration should be an iterative process, where improvements are consistently monitored and further adjustments made based on the data collected from users. Continual learning from user interactions can help increase the model's overall reliability and maintain its performance over time. - -Remember, calibrating LLMs is an essential part of creating reliable, high-quality language models that effectively meet user needs and expectations. Through prompt conditioning, response ranking, model debiasing, temperature adjustment, and iterative improvements, you can successfully achieve well-calibrated and reliable LLMs. - -Learn more at [learnprompting.org](https://learnprompting.org/docs/reliability/intro) diff --git a/src/data/roadmaps/prompt-engineering/content/reliability/debiasing.md b/src/data/roadmaps/prompt-engineering/content/reliability/debiasing.md deleted file mode 100644 index 6677f88e2..000000000 --- a/src/data/roadmaps/prompt-engineering/content/reliability/debiasing.md +++ /dev/null @@ -1,25 +0,0 @@ -# Prompt Debiasing - -Debiasing is the process of reducing bias in the development and performance of AI language models, such as OpenAI’s GPT-3. When constructing prompts, it's important to address existing biases and assumptions that may be inadvertently incorporated into the model due to training data or other factors. By considering debiasing, we aim to promote fairness, neutrality, and inclusiveness in AI-generated responses. - -## Why is Debiasing Important? - -AI models can absorb various biases from their diverse training data, including but not limited to: - -- Gender bias -- Racial bias -- Ethnic bias -- Political bias - -These biases may result in unfair, offensive, or misleading outputs. As prompt engineers, our responsibility is to create prompts that minimize the unintentional effects of such biases in the responses generated by the model. - -## Key Strategies for Debiasing - -Here are a few strategies that can help you address biases in your prompts: - -1. **Objective Wording**: Use objective language and avoid making assumptions about race, gender, ethnicity, nationality, or any other potentially biased characteristics. -2. **Equitable Representation**: Ensure prompts represent diverse perspectives and experiences, so that the model learns to generate responses that are fair and unbiased. -3. **Counter-balancing**: If a bias is unavoidable due to the context or nature of the prompt, consider counter-balancing it by providing an alternative perspective or side to the argument. -4. **Testing and Iterating**: Continuously test and iterate on your prompts, seeking feedback from a diverse group of reviewers to identify and correct potential biases. - -Learn more at [learnprompting.org](https://learnprompting.org/docs/reliability/intro) diff --git a/src/data/roadmaps/prompt-engineering/content/reliability/ensembling.md b/src/data/roadmaps/prompt-engineering/content/reliability/ensembling.md deleted file mode 100644 index 16a361d1c..000000000 --- a/src/data/roadmaps/prompt-engineering/content/reliability/ensembling.md +++ /dev/null @@ -1,15 +0,0 @@ -# Prompt Ensembling - -Ensembling is a technique used to improve the reliability and accuracy of predictions by combining multiple different models, essentially leveraging the 'wisdom of the crowd'. The idea is that combining the outputs of several models can cancel out biases, reduce variance, and lead to a more accurate and robust prediction. - -There are several ensembling techniques that can be used, including: - -- **Majority voting**: Each model votes for a specific output, and the one with the most votes is the final prediction. -- **Weighted voting**: Similar to majority voting, but each model has a predefined weight based on its performance, accuracy, or other criteria. The final prediction is based on the weighted sum of all model predictions. -- **Bagging**: Each model is trained on a slightly different dataset, typically generated by sampling with replacement (bootstrap) from the original dataset. The predictions are then combined, usually through majority voting or averaging. -- **Boosting**: A sequential ensemble method where each new model aims to correct the mistakes made by the previous models. The final prediction is a weighted combination of the outputs from all models. -- **Stacking**: Multiple base models predict the output, and these predictions are used as inputs for a second-layer model, which provides the final prediction. - -Incorporating ensembling in your prompt engineering process can help produce more reliable results, but be mindful of factors such as increased computational complexity and potential overfitting. To achieve the best results, make sure to use diverse models in your ensemble and pay attention to tuning their parameters, balancing their weights, and selecting suitable ensembling techniques based on your specific problem and dataset. - -Learn more at [learnprompting.org](https://learnprompting.org/docs/reliability/intro) diff --git a/src/data/roadmaps/prompt-engineering/content/reliability/index.md b/src/data/roadmaps/prompt-engineering/content/reliability/index.md deleted file mode 100644 index 4c29b9e51..000000000 --- a/src/data/roadmaps/prompt-engineering/content/reliability/index.md +++ /dev/null @@ -1,9 +0,0 @@ -# Improving Reliability - -To a certain extent, most of the previous techniques covered have to do with improving completion accuracy, and thus reliability, in particular self-consistency. However, there are a number of other techniques that can be used to improve reliability, beyond basic prompting strategies. - -LLMs have been found to be more reliable than we might expect at interpreting what a prompt is trying to say when responding to misspelled, badly phrased, or even actively misleading prompts. Despite this ability, they still exhibit various problems including hallucinations, flawed explanations with CoT methods, and multiple biases including majority label bias, recency bias, and common token bias. Additionally, zero-shot CoT can be particularly biased when dealing with sensitive topics. - -Common solutions to some of these problems include calibrators to remove a priori biases, and verifiers to score completions, as well as promoting diversity in completions. - -Learn more at [learnprompting.org](https://learnprompting.org/docs/reliability/intro) diff --git a/src/data/roadmaps/prompt-engineering/content/reliability/math.md b/src/data/roadmaps/prompt-engineering/content/reliability/math.md deleted file mode 100644 index 8767c3450..000000000 --- a/src/data/roadmaps/prompt-engineering/content/reliability/math.md +++ /dev/null @@ -1,18 +0,0 @@ -# Math - -As a prompt engineer, you can take the following steps to improve the reliability of Language Models (LMs) for mathematical tasks: - -- Clear and specific prompts: Craft clear and specific prompts that provide the necessary context for the mathematical task. Specify the problem type, expected input format, and desired output format. Avoid ambiguous or vague instructions that can confuse the LM. -- Formatting cues: Include formatting cues in the prompts to guide the LM on how to interpret and generate mathematical expressions. For example, use LaTeX formatting or explicit notations for mathematical symbols, equations, or variables. -- Example-based prompts: Provide example-based prompts that demonstrate the desired input-output behavior. Show the model correct solutions for different problem types to help it understand the expected patterns and formats. -- Step-by-step instructions: Break down complex mathematical problems into step-by-step instructions. Provide explicit instructions on how the model should approach the problem, such as defining variables, applying specific rules or formulas, or following a particular sequence of operations. -- Error handling: Anticipate potential errors or misconceptions the LM might make, and explicitly instruct it on how to handle those cases. Provide guidance on common mistakes and offer corrective feedback to help the model learn from its errors. -- Feedback loop: Continuously evaluate the model's responses and iterate on the prompts based on user feedback. Identify areas where the LM is consistently making errors or struggling, and modify the prompts to address those specific challenges. -- Context injection: Inject additional context into the prompt to help the model better understand the problem. This can include relevant background information, specific problem constraints, or hints to guide the LM towards the correct solution. -- Progressive disclosure: Gradually reveal information or subtasks to the LM, rather than providing the entire problem at once. This can help the model focus on smaller subproblems and reduce the cognitive load, leading to more reliable outputs. -- Sanity checks: Include sanity checks in the prompt to verify the reasonableness of the model's output. For example, you can ask the model to show intermediate steps or validate the solution against known mathematical properties. -- Fine-tuning and experimentation: Fine-tune the LM on a dataset that specifically focuses on mathematical tasks. Experiment with different prompt engineering techniques and evaluate the impact on the model's reliability. Iterate on the fine-tuning process based on the results obtained. - -By applying these prompt engineering strategies, you can guide the LM towards more reliable and accurate responses for mathematical tasks, improving the overall usability and trustworthiness of the model. - -Learn more at [learnprompting.org](https://learnprompting.org/docs/reliability/intro) diff --git a/src/data/roadmaps/prompt-engineering/content/reliability/self-evaluation.md b/src/data/roadmaps/prompt-engineering/content/reliability/self-evaluation.md deleted file mode 100644 index 5bb407e1a..000000000 --- a/src/data/roadmaps/prompt-engineering/content/reliability/self-evaluation.md +++ /dev/null @@ -1,19 +0,0 @@ -# LLM Self Evaluation - -Self-evaluation is an essential aspect of the prompt engineering process. It involves the ability of an AI model to assess its own performance and determine the level of confidence it has in its responses. By properly incorporating self-evaluation, the AI can improve its reliability, as it will learn to identify its weaknesses and provide more accurate responses over time. - -## Implementing Self-Evaluation - -When incorporating self-evaluation into an AI model, you should consider the following elements: - -1. **Objective metrics**: Develop quantitative measures that determine the quality of a response. Examples include accuracy, precision, recall, and F1 scores. These metrics can be used as part of the AI model's assessment process, offering a consistent way to gauge its performance. - -2. **User feedback**: Collect user feedback on the AI model's responses, as users can provide valuable information about the quality and utility of the generated content. By allowing users to rate answers or report issues, the AI model can integrate this feedback into its self-evaluation process. - -3. **Confidence levels**: Implement a system that measures the AI model's confidence in its responses. A confidence score can help users understand the reliability of a response, and it can also help the AI model refine its behavior when it has uncertainty. Make sure the confidence score is calculated based on factors such as data quality, algorithm performance, and historical accuracy. - -4. **Error monitoring**: Establish a system that continuously monitors the AI model's performance by tracking errors, outliers, and other unexpected results. This monitoring process should inform the self-evaluation mechanism and help the AI model adapt over time. - -By incorporating self-evaluation into your AI model, you can create a more reliable system that users will trust and appreciate. This, in turn, will lead to a greater sense of confidence in the AI model and its potential to solve real-world problems. - -Learn more at [learnprompting.org](https://learnprompting.org/docs/reliability/intro) diff --git a/src/data/roadmaps/prompt-engineering/content/repetition-penalties@g8ylIg4Zh567u-E3yVVY4.md b/src/data/roadmaps/prompt-engineering/content/repetition-penalties@g8ylIg4Zh567u-E3yVVY4.md new file mode 100644 index 000000000..08df26272 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/repetition-penalties@g8ylIg4Zh567u-E3yVVY4.md @@ -0,0 +1 @@ +# Repetition Penalties \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/role-prompting@XHWKGaSRBYT4MsCHwV-iR.md b/src/data/roadmaps/prompt-engineering/content/role-prompting@XHWKGaSRBYT4MsCHwV-iR.md new file mode 100644 index 000000000..c833363f9 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/role-prompting@XHWKGaSRBYT4MsCHwV-iR.md @@ -0,0 +1 @@ +# Role Prompting \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/sampling-parameters@JgigM7HvmNOuKnp60v1Ce.md b/src/data/roadmaps/prompt-engineering/content/sampling-parameters@JgigM7HvmNOuKnp60v1Ce.md new file mode 100644 index 000000000..aa4801b74 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/sampling-parameters@JgigM7HvmNOuKnp60v1Ce.md @@ -0,0 +1 @@ +# Sampling Parameters \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/self-consistency-prompting@1EzqCoplXPiHjp9Z-vqn-.md b/src/data/roadmaps/prompt-engineering/content/self-consistency-prompting@1EzqCoplXPiHjp9Z-vqn-.md new file mode 100644 index 000000000..3ca60a85d --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/self-consistency-prompting@1EzqCoplXPiHjp9Z-vqn-.md @@ -0,0 +1 @@ +# Self-Consistency Prompting \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/step-back-prompting@2MboHh8ugkoH8dSd9d4Mk.md b/src/data/roadmaps/prompt-engineering/content/step-back-prompting@2MboHh8ugkoH8dSd9d4Mk.md new file mode 100644 index 000000000..a786a6952 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/step-back-prompting@2MboHh8ugkoH8dSd9d4Mk.md @@ -0,0 +1 @@ +# Step-back Prompting \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/stop-sequences@v3CylRlojeltcwnE76j8Q.md b/src/data/roadmaps/prompt-engineering/content/stop-sequences@v3CylRlojeltcwnE76j8Q.md new file mode 100644 index 000000000..cf61ab5a3 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/stop-sequences@v3CylRlojeltcwnE76j8Q.md @@ -0,0 +1 @@ +# Stop Sequences \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/structured-outputs@j-PWO-ZmF9Oi9A5bwMRto.md b/src/data/roadmaps/prompt-engineering/content/structured-outputs@j-PWO-ZmF9Oi9A5bwMRto.md new file mode 100644 index 000000000..90dd31221 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/structured-outputs@j-PWO-ZmF9Oi9A5bwMRto.md @@ -0,0 +1 @@ +# Structured Outputs \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/system-prompting@fWo39-hehRgwmx7CF36mM.md b/src/data/roadmaps/prompt-engineering/content/system-prompting@fWo39-hehRgwmx7CF36mM.md new file mode 100644 index 000000000..a8f1b5205 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/system-prompting@fWo39-hehRgwmx7CF36mM.md @@ -0,0 +1 @@ +# System Prompting \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/temperature@iMwg-I76-Tg5dhu8DGO6U.md b/src/data/roadmaps/prompt-engineering/content/temperature@iMwg-I76-Tg5dhu8DGO6U.md new file mode 100644 index 000000000..9028fe2d0 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/temperature@iMwg-I76-Tg5dhu8DGO6U.md @@ -0,0 +1 @@ +# Temperature \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/tokens@NPcaSEteeEA5g22wQ7nL_.md b/src/data/roadmaps/prompt-engineering/content/tokens@NPcaSEteeEA5g22wQ7nL_.md new file mode 100644 index 000000000..7e1c51cd3 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/tokens@NPcaSEteeEA5g22wQ7nL_.md @@ -0,0 +1 @@ +# Tokens \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/top-k@FF8ai1v5GDzxXLQhpwuPj.md b/src/data/roadmaps/prompt-engineering/content/top-k@FF8ai1v5GDzxXLQhpwuPj.md new file mode 100644 index 000000000..72cfa3ce7 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/top-k@FF8ai1v5GDzxXLQhpwuPj.md @@ -0,0 +1 @@ +# Top-K \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/top-p@-G1U1jDN5st1fTUtQmMl1.md b/src/data/roadmaps/prompt-engineering/content/top-p@-G1U1jDN5st1fTUtQmMl1.md new file mode 100644 index 000000000..4c3a6d7c9 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/top-p@-G1U1jDN5st1fTUtQmMl1.md @@ -0,0 +1 @@ +# Top-P \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/tree-of-thoughts-tot-prompting@ob9D0W9B9145Da64nbi1M.md b/src/data/roadmaps/prompt-engineering/content/tree-of-thoughts-tot-prompting@ob9D0W9B9145Da64nbi1M.md new file mode 100644 index 000000000..c4f4d214d --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/tree-of-thoughts-tot-prompting@ob9D0W9B9145Da64nbi1M.md @@ -0,0 +1 @@ +# Tree of Thoughts (ToT) Prompting \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/what-is-a-prompt@i4ijY3T5gLgNz0XqRipXe.md b/src/data/roadmaps/prompt-engineering/content/what-is-a-prompt@i4ijY3T5gLgNz0XqRipXe.md new file mode 100644 index 000000000..9b0abdf72 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/what-is-a-prompt@i4ijY3T5gLgNz0XqRipXe.md @@ -0,0 +1 @@ +# What is a Prompt? \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/what-is-prompt-engineering@43drPbTwPqJQPyzwYUdBT.md b/src/data/roadmaps/prompt-engineering/content/what-is-prompt-engineering@43drPbTwPqJQPyzwYUdBT.md new file mode 100644 index 000000000..04d033aca --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/what-is-prompt-engineering@43drPbTwPqJQPyzwYUdBT.md @@ -0,0 +1 @@ +# What is Prompt Engineering? \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/xai@3wshuH7_DXgbhxsLzzI4D.md b/src/data/roadmaps/prompt-engineering/content/xai@3wshuH7_DXgbhxsLzzI4D.md new file mode 100644 index 000000000..12fb209fc --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/xai@3wshuH7_DXgbhxsLzzI4D.md @@ -0,0 +1 @@ +# xAI \ No newline at end of file diff --git a/src/data/roadmaps/prompt-engineering/content/zero-shot-prompting@GRerL9UXN73TwpCW2eTIE.md b/src/data/roadmaps/prompt-engineering/content/zero-shot-prompting@GRerL9UXN73TwpCW2eTIE.md new file mode 100644 index 000000000..65b4930d5 --- /dev/null +++ b/src/data/roadmaps/prompt-engineering/content/zero-shot-prompting@GRerL9UXN73TwpCW2eTIE.md @@ -0,0 +1 @@ +# Zero-Shot Prompting \ No newline at end of file