Python script to extract text from PDF with images





.everyoneloves__top-leaderboard:empty,.everyoneloves__mid-leaderboard:empty,.everyoneloves__bot-mid-leaderboard:empty{ margin-bottom:0;
}







11












$begingroup$


I have the following Python script. The purpose of the script is to extract text from PDFs. I use textract for that because soon I realized there is no easy way to check if a page contains an image or not. So I extract the whole text using textract.



The workflow is like this. main() parses each pdf file from a folder, I extract the text, I search for keyword strikes and then I export the result to a csv file inside folder output_results.



What I can fix on my logic? What can I change in my code? I find it messy, how I can clean it up?



import textract
import os
import csv

class PdfMiner():

path = os.getcwd() + '/folderForPdf/'
output_path = os.getcwd() + '/output_results/'

def __init__(self):
pass

def main(self):
for self.filename in os.listdir(self.path):
self.text = (PdfMiner().extract_text_from_pdf(self.path + self.filename))
self.keyword_strike_dict = PdfMiner().keyword_strike(self.text)
if bool(self.keyword_strike_dict):
PdfMiner().output_to_csv(self.filename, self.keyword_strike_dict)


def keyword_strike(self, text, keyword_strike_dict={}):
'''keyword_strike function counts how many times a specific keyword occurs'''
self.keyword_strike_dict = {}
self.text = text
self.keywords_list = PdfMiner().extract_keywords()
for keyword in self.keywords_list:
if keyword in text.decode('utf-8'):
self.keyword_strike_dict[keyword] = text.decode('utf-8').count(keyword)
return self.keyword_strike_dict

def extract_keywords(self, keywords_list=None):
'''function extract_keywords extract the keywords from file keywords.txt, into a list'''
keywords_list =
with open('keywords.txt', 'r', encoding='utf8') as keywords_file:
for keyword in keywords_file:
keywords_list.append(keyword.strip('n'))
return keywords_list

def extract_text_from_pdf(self, file_destination, text=None):
'''extract_text_from_pdf'''
self.file_destination = file_destination
text = textract.process(self.file_destination, method='tesseract', language='eng', encoding='utf-8')
return text

def output_to_csv(self, *args, **kwargs):
'''output_csv exports results to csv'''
self.filename = args[0]
self.keyword_strike_dict = args[1]
self.output_file_path = PdfMiner().output_path + self.filename.strip('.pdf')
with open(self.output_file_path + '.csv', 'w+', newline='') as csvfile:
row_writer = csv.writer(csvfile, delimiter=',')
row_writer.writerow(['keyword', 'keyword_count'])
for keyword, keyword_count in self.keyword_strike_dict.items():
print(keyword, keyword_count)
row_writer.writerow([keyword, keyword_count])

if __name__ == "__main__":
PdfMiner().main()









share|improve this question











$endgroup$



















    11












    $begingroup$


    I have the following Python script. The purpose of the script is to extract text from PDFs. I use textract for that because soon I realized there is no easy way to check if a page contains an image or not. So I extract the whole text using textract.



    The workflow is like this. main() parses each pdf file from a folder, I extract the text, I search for keyword strikes and then I export the result to a csv file inside folder output_results.



    What I can fix on my logic? What can I change in my code? I find it messy, how I can clean it up?



    import textract
    import os
    import csv

    class PdfMiner():

    path = os.getcwd() + '/folderForPdf/'
    output_path = os.getcwd() + '/output_results/'

    def __init__(self):
    pass

    def main(self):
    for self.filename in os.listdir(self.path):
    self.text = (PdfMiner().extract_text_from_pdf(self.path + self.filename))
    self.keyword_strike_dict = PdfMiner().keyword_strike(self.text)
    if bool(self.keyword_strike_dict):
    PdfMiner().output_to_csv(self.filename, self.keyword_strike_dict)


    def keyword_strike(self, text, keyword_strike_dict={}):
    '''keyword_strike function counts how many times a specific keyword occurs'''
    self.keyword_strike_dict = {}
    self.text = text
    self.keywords_list = PdfMiner().extract_keywords()
    for keyword in self.keywords_list:
    if keyword in text.decode('utf-8'):
    self.keyword_strike_dict[keyword] = text.decode('utf-8').count(keyword)
    return self.keyword_strike_dict

    def extract_keywords(self, keywords_list=None):
    '''function extract_keywords extract the keywords from file keywords.txt, into a list'''
    keywords_list =
    with open('keywords.txt', 'r', encoding='utf8') as keywords_file:
    for keyword in keywords_file:
    keywords_list.append(keyword.strip('n'))
    return keywords_list

    def extract_text_from_pdf(self, file_destination, text=None):
    '''extract_text_from_pdf'''
    self.file_destination = file_destination
    text = textract.process(self.file_destination, method='tesseract', language='eng', encoding='utf-8')
    return text

    def output_to_csv(self, *args, **kwargs):
    '''output_csv exports results to csv'''
    self.filename = args[0]
    self.keyword_strike_dict = args[1]
    self.output_file_path = PdfMiner().output_path + self.filename.strip('.pdf')
    with open(self.output_file_path + '.csv', 'w+', newline='') as csvfile:
    row_writer = csv.writer(csvfile, delimiter=',')
    row_writer.writerow(['keyword', 'keyword_count'])
    for keyword, keyword_count in self.keyword_strike_dict.items():
    print(keyword, keyword_count)
    row_writer.writerow([keyword, keyword_count])

    if __name__ == "__main__":
    PdfMiner().main()









    share|improve this question











    $endgroup$















      11












      11








      11


      5



      $begingroup$


      I have the following Python script. The purpose of the script is to extract text from PDFs. I use textract for that because soon I realized there is no easy way to check if a page contains an image or not. So I extract the whole text using textract.



      The workflow is like this. main() parses each pdf file from a folder, I extract the text, I search for keyword strikes and then I export the result to a csv file inside folder output_results.



      What I can fix on my logic? What can I change in my code? I find it messy, how I can clean it up?



      import textract
      import os
      import csv

      class PdfMiner():

      path = os.getcwd() + '/folderForPdf/'
      output_path = os.getcwd() + '/output_results/'

      def __init__(self):
      pass

      def main(self):
      for self.filename in os.listdir(self.path):
      self.text = (PdfMiner().extract_text_from_pdf(self.path + self.filename))
      self.keyword_strike_dict = PdfMiner().keyword_strike(self.text)
      if bool(self.keyword_strike_dict):
      PdfMiner().output_to_csv(self.filename, self.keyword_strike_dict)


      def keyword_strike(self, text, keyword_strike_dict={}):
      '''keyword_strike function counts how many times a specific keyword occurs'''
      self.keyword_strike_dict = {}
      self.text = text
      self.keywords_list = PdfMiner().extract_keywords()
      for keyword in self.keywords_list:
      if keyword in text.decode('utf-8'):
      self.keyword_strike_dict[keyword] = text.decode('utf-8').count(keyword)
      return self.keyword_strike_dict

      def extract_keywords(self, keywords_list=None):
      '''function extract_keywords extract the keywords from file keywords.txt, into a list'''
      keywords_list =
      with open('keywords.txt', 'r', encoding='utf8') as keywords_file:
      for keyword in keywords_file:
      keywords_list.append(keyword.strip('n'))
      return keywords_list

      def extract_text_from_pdf(self, file_destination, text=None):
      '''extract_text_from_pdf'''
      self.file_destination = file_destination
      text = textract.process(self.file_destination, method='tesseract', language='eng', encoding='utf-8')
      return text

      def output_to_csv(self, *args, **kwargs):
      '''output_csv exports results to csv'''
      self.filename = args[0]
      self.keyword_strike_dict = args[1]
      self.output_file_path = PdfMiner().output_path + self.filename.strip('.pdf')
      with open(self.output_file_path + '.csv', 'w+', newline='') as csvfile:
      row_writer = csv.writer(csvfile, delimiter=',')
      row_writer.writerow(['keyword', 'keyword_count'])
      for keyword, keyword_count in self.keyword_strike_dict.items():
      print(keyword, keyword_count)
      row_writer.writerow([keyword, keyword_count])

      if __name__ == "__main__":
      PdfMiner().main()









      share|improve this question











      $endgroup$




      I have the following Python script. The purpose of the script is to extract text from PDFs. I use textract for that because soon I realized there is no easy way to check if a page contains an image or not. So I extract the whole text using textract.



      The workflow is like this. main() parses each pdf file from a folder, I extract the text, I search for keyword strikes and then I export the result to a csv file inside folder output_results.



      What I can fix on my logic? What can I change in my code? I find it messy, how I can clean it up?



      import textract
      import os
      import csv

      class PdfMiner():

      path = os.getcwd() + '/folderForPdf/'
      output_path = os.getcwd() + '/output_results/'

      def __init__(self):
      pass

      def main(self):
      for self.filename in os.listdir(self.path):
      self.text = (PdfMiner().extract_text_from_pdf(self.path + self.filename))
      self.keyword_strike_dict = PdfMiner().keyword_strike(self.text)
      if bool(self.keyword_strike_dict):
      PdfMiner().output_to_csv(self.filename, self.keyword_strike_dict)


      def keyword_strike(self, text, keyword_strike_dict={}):
      '''keyword_strike function counts how many times a specific keyword occurs'''
      self.keyword_strike_dict = {}
      self.text = text
      self.keywords_list = PdfMiner().extract_keywords()
      for keyword in self.keywords_list:
      if keyword in text.decode('utf-8'):
      self.keyword_strike_dict[keyword] = text.decode('utf-8').count(keyword)
      return self.keyword_strike_dict

      def extract_keywords(self, keywords_list=None):
      '''function extract_keywords extract the keywords from file keywords.txt, into a list'''
      keywords_list =
      with open('keywords.txt', 'r', encoding='utf8') as keywords_file:
      for keyword in keywords_file:
      keywords_list.append(keyword.strip('n'))
      return keywords_list

      def extract_text_from_pdf(self, file_destination, text=None):
      '''extract_text_from_pdf'''
      self.file_destination = file_destination
      text = textract.process(self.file_destination, method='tesseract', language='eng', encoding='utf-8')
      return text

      def output_to_csv(self, *args, **kwargs):
      '''output_csv exports results to csv'''
      self.filename = args[0]
      self.keyword_strike_dict = args[1]
      self.output_file_path = PdfMiner().output_path + self.filename.strip('.pdf')
      with open(self.output_file_path + '.csv', 'w+', newline='') as csvfile:
      row_writer = csv.writer(csvfile, delimiter=',')
      row_writer.writerow(['keyword', 'keyword_count'])
      for keyword, keyword_count in self.keyword_strike_dict.items():
      print(keyword, keyword_count)
      row_writer.writerow([keyword, keyword_count])

      if __name__ == "__main__":
      PdfMiner().main()






      python python-3.x pdf






      share|improve this question















      share|improve this question













      share|improve this question




      share|improve this question








      edited May 20 at 18:07









      200_success

      135k21 gold badges173 silver badges443 bronze badges




      135k21 gold badges173 silver badges443 bronze badges










      asked May 20 at 9:47









      Iakovos BeloniasIakovos Belonias

      1585 bronze badges




      1585 bronze badges






















          1 Answer
          1






          active

          oldest

          votes


















          11












          $begingroup$

          I don't see a good reason why this should be a class. You only have two things in your state, self.text, which you could pass as an argument, and self.path, self.output_path, which I would also pass as arguments, maybe with a default value.



          Also, you are probably using classes wrong if your class has a main method that needs to instantiate new instances of the class on the fly.



          Your algorithm is not very efficient. You need to run over the whole text twice for each keyword. Once to check if it is in there and then again to count it. The former is obviously redundant, since str.count will just return 0 if the value is not present.



          However, what would be a better algorithm is to first extract all the words (for example using a regex filtering only letters) and then count the number of times each word occurs using a collections.Counter, optionally filtering it down to only those words which are keywords. It even has a most_common method, so your file will be ordered by number of occurrences, descending.



          Instead of mucking around with os.getcwd() and os.listdir, I would recommend to use the (Python 3) pathlib.Path object. It supports globbing (to get all files matching a pattern), chaining them to get a new path and even replacing the extension with a different one.



          When reading the keywords, you can use a simple list comprehension. Or, even better, a set comprehension to get in calls for free.



          line.strip() and line.strip("n") are probably doing the same thing, unless you really want to preserve the spaces at the end of words.



          At the same time, doing self.filename.strip('.pdf') is a bit dangerous. It removes all characters given, until none of the characters is found anymore. For example, "some_file_name_fdp.pdf" will be reduced to "some_file_name_".



          The csv.writer has a writerows method that takes an iterable of rows. This way you can avoid a for loop.



          I would ensure to run only over PDF files, otherwise you will get some errors if a non-PDF file manages to sneak into your folder.



          I have done all of this in the following code (not tested, since I don't have textract installed ATM):



          from collections import Counter
          import csv
          from pathlib import Path
          import re
          import textract

          def extract_text(file_name):
          return textract.process(file_name, method='tesseract', language='eng',
          encoding='utf-8').decode('utf-8')

          def extract_words(text):
          return re.findall(r'([a-zA-Z]+)', text)

          def count_keywords(words, keywords):
          return Counter(word for word in words if word in keywords)

          def read_keywords(file_name):
          with open(file_name) as f:
          return {line.strip() for line in f}

          def save_keywords(file_name, keywords):
          with open(file_name, "w", newline='') as csvfile:
          writer = csv.writer(csvfile, delimiter=',')
          writer.writerow(['keyword', 'keyword_count'])
          writer.writerows(keywords.most_common())

          def main():
          output_folder = Path("output_results")
          keywords = read_keywords('keywords.txt')

          for f in Path("folderForPdf").glob("*.pdf"):
          words = extract_words(extract_text(f))
          keyword_counts = count_keywords(words, keywords)
          save_keywords(output_folder / f.with_suffix(".csv"), keyword_counts)

          if __name__ == "__main__":
          main()





          share|improve this answer











          $endgroup$













          • $begingroup$
            Wow amazing job thank you very much. I see, most of my code is pointless
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:33






          • 2




            $begingroup$
            @IakovosBelonias Not pointless (it worked before, didn't it?), just a bit too verbose, maybe ;)
            $endgroup$
            – Graipher
            May 20 at 10:35








          • 1




            $begingroup$
            Thank you very much
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:36










          • $begingroup$
            Should I use regex considering that I don't care to much about extract the text but mostly to count the number of occurrence?
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:48










          • $begingroup$
            @IakovosBelonias Well, with this approach you need to find the words of the text first in order to count them. This is one advantage of your approach, at the cost of performance and false positives in case of partial matches with the keyword.
            $endgroup$
            – Graipher
            May 20 at 10:52














          Your Answer






          StackExchange.ifUsing("editor", function () {
          StackExchange.using("externalEditor", function () {
          StackExchange.using("snippets", function () {
          StackExchange.snippets.init();
          });
          });
          }, "code-snippets");

          StackExchange.ready(function() {
          var channelOptions = {
          tags: "".split(" "),
          id: "196"
          };
          initTagRenderer("".split(" "), "".split(" "), channelOptions);

          StackExchange.using("externalEditor", function() {
          // Have to fire editor after snippets, if snippets enabled
          if (StackExchange.settings.snippets.snippetsEnabled) {
          StackExchange.using("snippets", function() {
          createEditor();
          });
          }
          else {
          createEditor();
          }
          });

          function createEditor() {
          StackExchange.prepareEditor({
          heartbeatType: 'answer',
          autoActivateHeartbeat: false,
          convertImagesToLinks: false,
          noModals: true,
          showLowRepImageUploadWarning: true,
          reputationToPostImages: null,
          bindNavPrevention: true,
          postfix: "",
          imageUploader: {
          brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
          contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
          allowUrls: true
          },
          onDemand: true,
          discardSelector: ".discard-answer"
          ,immediatelyShowMarkdownHelp:true
          });


          }
          });














          draft saved

          draft discarded


















          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fcodereview.stackexchange.com%2fquestions%2f220550%2fpython-script-to-extract-text-from-pdf-with-images%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown

























          1 Answer
          1






          active

          oldest

          votes








          1 Answer
          1






          active

          oldest

          votes









          active

          oldest

          votes






          active

          oldest

          votes









          11












          $begingroup$

          I don't see a good reason why this should be a class. You only have two things in your state, self.text, which you could pass as an argument, and self.path, self.output_path, which I would also pass as arguments, maybe with a default value.



          Also, you are probably using classes wrong if your class has a main method that needs to instantiate new instances of the class on the fly.



          Your algorithm is not very efficient. You need to run over the whole text twice for each keyword. Once to check if it is in there and then again to count it. The former is obviously redundant, since str.count will just return 0 if the value is not present.



          However, what would be a better algorithm is to first extract all the words (for example using a regex filtering only letters) and then count the number of times each word occurs using a collections.Counter, optionally filtering it down to only those words which are keywords. It even has a most_common method, so your file will be ordered by number of occurrences, descending.



          Instead of mucking around with os.getcwd() and os.listdir, I would recommend to use the (Python 3) pathlib.Path object. It supports globbing (to get all files matching a pattern), chaining them to get a new path and even replacing the extension with a different one.



          When reading the keywords, you can use a simple list comprehension. Or, even better, a set comprehension to get in calls for free.



          line.strip() and line.strip("n") are probably doing the same thing, unless you really want to preserve the spaces at the end of words.



          At the same time, doing self.filename.strip('.pdf') is a bit dangerous. It removes all characters given, until none of the characters is found anymore. For example, "some_file_name_fdp.pdf" will be reduced to "some_file_name_".



          The csv.writer has a writerows method that takes an iterable of rows. This way you can avoid a for loop.



          I would ensure to run only over PDF files, otherwise you will get some errors if a non-PDF file manages to sneak into your folder.



          I have done all of this in the following code (not tested, since I don't have textract installed ATM):



          from collections import Counter
          import csv
          from pathlib import Path
          import re
          import textract

          def extract_text(file_name):
          return textract.process(file_name, method='tesseract', language='eng',
          encoding='utf-8').decode('utf-8')

          def extract_words(text):
          return re.findall(r'([a-zA-Z]+)', text)

          def count_keywords(words, keywords):
          return Counter(word for word in words if word in keywords)

          def read_keywords(file_name):
          with open(file_name) as f:
          return {line.strip() for line in f}

          def save_keywords(file_name, keywords):
          with open(file_name, "w", newline='') as csvfile:
          writer = csv.writer(csvfile, delimiter=',')
          writer.writerow(['keyword', 'keyword_count'])
          writer.writerows(keywords.most_common())

          def main():
          output_folder = Path("output_results")
          keywords = read_keywords('keywords.txt')

          for f in Path("folderForPdf").glob("*.pdf"):
          words = extract_words(extract_text(f))
          keyword_counts = count_keywords(words, keywords)
          save_keywords(output_folder / f.with_suffix(".csv"), keyword_counts)

          if __name__ == "__main__":
          main()





          share|improve this answer











          $endgroup$













          • $begingroup$
            Wow amazing job thank you very much. I see, most of my code is pointless
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:33






          • 2




            $begingroup$
            @IakovosBelonias Not pointless (it worked before, didn't it?), just a bit too verbose, maybe ;)
            $endgroup$
            – Graipher
            May 20 at 10:35








          • 1




            $begingroup$
            Thank you very much
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:36










          • $begingroup$
            Should I use regex considering that I don't care to much about extract the text but mostly to count the number of occurrence?
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:48










          • $begingroup$
            @IakovosBelonias Well, with this approach you need to find the words of the text first in order to count them. This is one advantage of your approach, at the cost of performance and false positives in case of partial matches with the keyword.
            $endgroup$
            – Graipher
            May 20 at 10:52
















          11












          $begingroup$

          I don't see a good reason why this should be a class. You only have two things in your state, self.text, which you could pass as an argument, and self.path, self.output_path, which I would also pass as arguments, maybe with a default value.



          Also, you are probably using classes wrong if your class has a main method that needs to instantiate new instances of the class on the fly.



          Your algorithm is not very efficient. You need to run over the whole text twice for each keyword. Once to check if it is in there and then again to count it. The former is obviously redundant, since str.count will just return 0 if the value is not present.



          However, what would be a better algorithm is to first extract all the words (for example using a regex filtering only letters) and then count the number of times each word occurs using a collections.Counter, optionally filtering it down to only those words which are keywords. It even has a most_common method, so your file will be ordered by number of occurrences, descending.



          Instead of mucking around with os.getcwd() and os.listdir, I would recommend to use the (Python 3) pathlib.Path object. It supports globbing (to get all files matching a pattern), chaining them to get a new path and even replacing the extension with a different one.



          When reading the keywords, you can use a simple list comprehension. Or, even better, a set comprehension to get in calls for free.



          line.strip() and line.strip("n") are probably doing the same thing, unless you really want to preserve the spaces at the end of words.



          At the same time, doing self.filename.strip('.pdf') is a bit dangerous. It removes all characters given, until none of the characters is found anymore. For example, "some_file_name_fdp.pdf" will be reduced to "some_file_name_".



          The csv.writer has a writerows method that takes an iterable of rows. This way you can avoid a for loop.



          I would ensure to run only over PDF files, otherwise you will get some errors if a non-PDF file manages to sneak into your folder.



          I have done all of this in the following code (not tested, since I don't have textract installed ATM):



          from collections import Counter
          import csv
          from pathlib import Path
          import re
          import textract

          def extract_text(file_name):
          return textract.process(file_name, method='tesseract', language='eng',
          encoding='utf-8').decode('utf-8')

          def extract_words(text):
          return re.findall(r'([a-zA-Z]+)', text)

          def count_keywords(words, keywords):
          return Counter(word for word in words if word in keywords)

          def read_keywords(file_name):
          with open(file_name) as f:
          return {line.strip() for line in f}

          def save_keywords(file_name, keywords):
          with open(file_name, "w", newline='') as csvfile:
          writer = csv.writer(csvfile, delimiter=',')
          writer.writerow(['keyword', 'keyword_count'])
          writer.writerows(keywords.most_common())

          def main():
          output_folder = Path("output_results")
          keywords = read_keywords('keywords.txt')

          for f in Path("folderForPdf").glob("*.pdf"):
          words = extract_words(extract_text(f))
          keyword_counts = count_keywords(words, keywords)
          save_keywords(output_folder / f.with_suffix(".csv"), keyword_counts)

          if __name__ == "__main__":
          main()





          share|improve this answer











          $endgroup$













          • $begingroup$
            Wow amazing job thank you very much. I see, most of my code is pointless
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:33






          • 2




            $begingroup$
            @IakovosBelonias Not pointless (it worked before, didn't it?), just a bit too verbose, maybe ;)
            $endgroup$
            – Graipher
            May 20 at 10:35








          • 1




            $begingroup$
            Thank you very much
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:36










          • $begingroup$
            Should I use regex considering that I don't care to much about extract the text but mostly to count the number of occurrence?
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:48










          • $begingroup$
            @IakovosBelonias Well, with this approach you need to find the words of the text first in order to count them. This is one advantage of your approach, at the cost of performance and false positives in case of partial matches with the keyword.
            $endgroup$
            – Graipher
            May 20 at 10:52














          11












          11








          11





          $begingroup$

          I don't see a good reason why this should be a class. You only have two things in your state, self.text, which you could pass as an argument, and self.path, self.output_path, which I would also pass as arguments, maybe with a default value.



          Also, you are probably using classes wrong if your class has a main method that needs to instantiate new instances of the class on the fly.



          Your algorithm is not very efficient. You need to run over the whole text twice for each keyword. Once to check if it is in there and then again to count it. The former is obviously redundant, since str.count will just return 0 if the value is not present.



          However, what would be a better algorithm is to first extract all the words (for example using a regex filtering only letters) and then count the number of times each word occurs using a collections.Counter, optionally filtering it down to only those words which are keywords. It even has a most_common method, so your file will be ordered by number of occurrences, descending.



          Instead of mucking around with os.getcwd() and os.listdir, I would recommend to use the (Python 3) pathlib.Path object. It supports globbing (to get all files matching a pattern), chaining them to get a new path and even replacing the extension with a different one.



          When reading the keywords, you can use a simple list comprehension. Or, even better, a set comprehension to get in calls for free.



          line.strip() and line.strip("n") are probably doing the same thing, unless you really want to preserve the spaces at the end of words.



          At the same time, doing self.filename.strip('.pdf') is a bit dangerous. It removes all characters given, until none of the characters is found anymore. For example, "some_file_name_fdp.pdf" will be reduced to "some_file_name_".



          The csv.writer has a writerows method that takes an iterable of rows. This way you can avoid a for loop.



          I would ensure to run only over PDF files, otherwise you will get some errors if a non-PDF file manages to sneak into your folder.



          I have done all of this in the following code (not tested, since I don't have textract installed ATM):



          from collections import Counter
          import csv
          from pathlib import Path
          import re
          import textract

          def extract_text(file_name):
          return textract.process(file_name, method='tesseract', language='eng',
          encoding='utf-8').decode('utf-8')

          def extract_words(text):
          return re.findall(r'([a-zA-Z]+)', text)

          def count_keywords(words, keywords):
          return Counter(word for word in words if word in keywords)

          def read_keywords(file_name):
          with open(file_name) as f:
          return {line.strip() for line in f}

          def save_keywords(file_name, keywords):
          with open(file_name, "w", newline='') as csvfile:
          writer = csv.writer(csvfile, delimiter=',')
          writer.writerow(['keyword', 'keyword_count'])
          writer.writerows(keywords.most_common())

          def main():
          output_folder = Path("output_results")
          keywords = read_keywords('keywords.txt')

          for f in Path("folderForPdf").glob("*.pdf"):
          words = extract_words(extract_text(f))
          keyword_counts = count_keywords(words, keywords)
          save_keywords(output_folder / f.with_suffix(".csv"), keyword_counts)

          if __name__ == "__main__":
          main()





          share|improve this answer











          $endgroup$



          I don't see a good reason why this should be a class. You only have two things in your state, self.text, which you could pass as an argument, and self.path, self.output_path, which I would also pass as arguments, maybe with a default value.



          Also, you are probably using classes wrong if your class has a main method that needs to instantiate new instances of the class on the fly.



          Your algorithm is not very efficient. You need to run over the whole text twice for each keyword. Once to check if it is in there and then again to count it. The former is obviously redundant, since str.count will just return 0 if the value is not present.



          However, what would be a better algorithm is to first extract all the words (for example using a regex filtering only letters) and then count the number of times each word occurs using a collections.Counter, optionally filtering it down to only those words which are keywords. It even has a most_common method, so your file will be ordered by number of occurrences, descending.



          Instead of mucking around with os.getcwd() and os.listdir, I would recommend to use the (Python 3) pathlib.Path object. It supports globbing (to get all files matching a pattern), chaining them to get a new path and even replacing the extension with a different one.



          When reading the keywords, you can use a simple list comprehension. Or, even better, a set comprehension to get in calls for free.



          line.strip() and line.strip("n") are probably doing the same thing, unless you really want to preserve the spaces at the end of words.



          At the same time, doing self.filename.strip('.pdf') is a bit dangerous. It removes all characters given, until none of the characters is found anymore. For example, "some_file_name_fdp.pdf" will be reduced to "some_file_name_".



          The csv.writer has a writerows method that takes an iterable of rows. This way you can avoid a for loop.



          I would ensure to run only over PDF files, otherwise you will get some errors if a non-PDF file manages to sneak into your folder.



          I have done all of this in the following code (not tested, since I don't have textract installed ATM):



          from collections import Counter
          import csv
          from pathlib import Path
          import re
          import textract

          def extract_text(file_name):
          return textract.process(file_name, method='tesseract', language='eng',
          encoding='utf-8').decode('utf-8')

          def extract_words(text):
          return re.findall(r'([a-zA-Z]+)', text)

          def count_keywords(words, keywords):
          return Counter(word for word in words if word in keywords)

          def read_keywords(file_name):
          with open(file_name) as f:
          return {line.strip() for line in f}

          def save_keywords(file_name, keywords):
          with open(file_name, "w", newline='') as csvfile:
          writer = csv.writer(csvfile, delimiter=',')
          writer.writerow(['keyword', 'keyword_count'])
          writer.writerows(keywords.most_common())

          def main():
          output_folder = Path("output_results")
          keywords = read_keywords('keywords.txt')

          for f in Path("folderForPdf").glob("*.pdf"):
          words = extract_words(extract_text(f))
          keyword_counts = count_keywords(words, keywords)
          save_keywords(output_folder / f.with_suffix(".csv"), keyword_counts)

          if __name__ == "__main__":
          main()






          share|improve this answer














          share|improve this answer



          share|improve this answer








          edited May 20 at 14:24

























          answered May 20 at 10:30









          GraipherGraipher

          29.3k5 gold badges46 silver badges103 bronze badges




          29.3k5 gold badges46 silver badges103 bronze badges












          • $begingroup$
            Wow amazing job thank you very much. I see, most of my code is pointless
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:33






          • 2




            $begingroup$
            @IakovosBelonias Not pointless (it worked before, didn't it?), just a bit too verbose, maybe ;)
            $endgroup$
            – Graipher
            May 20 at 10:35








          • 1




            $begingroup$
            Thank you very much
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:36










          • $begingroup$
            Should I use regex considering that I don't care to much about extract the text but mostly to count the number of occurrence?
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:48










          • $begingroup$
            @IakovosBelonias Well, with this approach you need to find the words of the text first in order to count them. This is one advantage of your approach, at the cost of performance and false positives in case of partial matches with the keyword.
            $endgroup$
            – Graipher
            May 20 at 10:52


















          • $begingroup$
            Wow amazing job thank you very much. I see, most of my code is pointless
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:33






          • 2




            $begingroup$
            @IakovosBelonias Not pointless (it worked before, didn't it?), just a bit too verbose, maybe ;)
            $endgroup$
            – Graipher
            May 20 at 10:35








          • 1




            $begingroup$
            Thank you very much
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:36










          • $begingroup$
            Should I use regex considering that I don't care to much about extract the text but mostly to count the number of occurrence?
            $endgroup$
            – Iakovos Belonias
            May 20 at 10:48










          • $begingroup$
            @IakovosBelonias Well, with this approach you need to find the words of the text first in order to count them. This is one advantage of your approach, at the cost of performance and false positives in case of partial matches with the keyword.
            $endgroup$
            – Graipher
            May 20 at 10:52
















          $begingroup$
          Wow amazing job thank you very much. I see, most of my code is pointless
          $endgroup$
          – Iakovos Belonias
          May 20 at 10:33




          $begingroup$
          Wow amazing job thank you very much. I see, most of my code is pointless
          $endgroup$
          – Iakovos Belonias
          May 20 at 10:33




          2




          2




          $begingroup$
          @IakovosBelonias Not pointless (it worked before, didn't it?), just a bit too verbose, maybe ;)
          $endgroup$
          – Graipher
          May 20 at 10:35






          $begingroup$
          @IakovosBelonias Not pointless (it worked before, didn't it?), just a bit too verbose, maybe ;)
          $endgroup$
          – Graipher
          May 20 at 10:35






          1




          1




          $begingroup$
          Thank you very much
          $endgroup$
          – Iakovos Belonias
          May 20 at 10:36




          $begingroup$
          Thank you very much
          $endgroup$
          – Iakovos Belonias
          May 20 at 10:36












          $begingroup$
          Should I use regex considering that I don't care to much about extract the text but mostly to count the number of occurrence?
          $endgroup$
          – Iakovos Belonias
          May 20 at 10:48




          $begingroup$
          Should I use regex considering that I don't care to much about extract the text but mostly to count the number of occurrence?
          $endgroup$
          – Iakovos Belonias
          May 20 at 10:48












          $begingroup$
          @IakovosBelonias Well, with this approach you need to find the words of the text first in order to count them. This is one advantage of your approach, at the cost of performance and false positives in case of partial matches with the keyword.
          $endgroup$
          – Graipher
          May 20 at 10:52




          $begingroup$
          @IakovosBelonias Well, with this approach you need to find the words of the text first in order to count them. This is one advantage of your approach, at the cost of performance and false positives in case of partial matches with the keyword.
          $endgroup$
          – Graipher
          May 20 at 10:52


















          draft saved

          draft discarded




















































          Thanks for contributing an answer to Code Review Stack Exchange!


          • Please be sure to answer the question. Provide details and share your research!

          But avoid



          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.


          Use MathJax to format equations. MathJax reference.


          To learn more, see our tips on writing great answers.




          draft saved


          draft discarded














          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fcodereview.stackexchange.com%2fquestions%2f220550%2fpython-script-to-extract-text-from-pdf-with-images%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown





















































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown

































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown







          Popular posts from this blog

          Færeyskur hestur Heimild | Tengill | Tilvísanir | LeiðsagnarvalRossið - síða um færeyska hrossið á færeyskuGott ár hjá færeyska hestinum

          He _____ here since 1970 . Answer needed [closed]What does “since he was so high” mean?Meaning of “catch birds for”?How do I ensure “since” takes the meaning I want?“Who cares here” meaningWhat does “right round toward” mean?the time tense (had now been detected)What does the phrase “ring around the roses” mean here?Correct usage of “visited upon”Meaning of “foiled rail sabotage bid”It was the third time I had gone to Rome or It is the third time I had been to Rome

          Slayer Innehåll Historia | Stil, komposition och lyrik | Bandets betydelse och framgångar | Sidoprojekt och samarbeten | Kontroverser | Medlemmar | Utmärkelser och nomineringar | Turnéer och festivaler | Diskografi | Referenser | Externa länkar | Navigeringsmenywww.slayer.net”Metal Massacre vol. 1””Metal Massacre vol. 3””Metal Massacre Volume III””Show No Mercy””Haunting the Chapel””Live Undead””Hell Awaits””Reign in Blood””Reign in Blood””Gold & Platinum – Reign in Blood””Golden Gods Awards Winners”originalet”Kerrang! Hall Of Fame””Slayer Looks Back On 37-Year Career In New Video Series: Part Two””South of Heaven””Gold & Platinum – South of Heaven””Seasons in the Abyss””Gold & Platinum - Seasons in the Abyss””Divine Intervention””Divine Intervention - Release group by Slayer””Gold & Platinum - Divine Intervention””Live Intrusion””Undisputed Attitude””Abolish Government/Superficial Love””Release “Slatanic Slaughter: A Tribute to Slayer” by Various Artists””Diabolus in Musica””Soundtrack to the Apocalypse””God Hates Us All””Systematic - Relationships””War at the Warfield””Gold & Platinum - War at the Warfield””Soundtrack to the Apocalypse””Gold & Platinum - Still Reigning””Metallica, Slayer, Iron Mauden Among Winners At Metal Hammer Awards””Eternal Pyre””Eternal Pyre - Slayer release group””Eternal Pyre””Metal Storm Awards 2006””Kerrang! Hall Of Fame””Slayer Wins 'Best Metal' Grammy Award””Slayer Guitarist Jeff Hanneman Dies””Bullet-For My Valentine booed at Metal Hammer Golden Gods Awards””Unholy Aliance””The End Of Slayer?””Slayer: We Could Thrash Out Two More Albums If We're Fast Enough...””'The Unholy Alliance: Chapter III' UK Dates Added”originalet”Megadeth And Slayer To Co-Headline 'Canadian Carnage' Trek”originalet”World Painted Blood””Release “World Painted Blood” by Slayer””Metallica Heading To Cinemas””Slayer, Megadeth To Join Forces For 'European Carnage' Tour - Dec. 18, 2010”originalet”Slayer's Hanneman Contracts Acute Infection; Band To Bring In Guest Guitarist””Cannibal Corpse's Pat O'Brien Will Step In As Slayer's Guest Guitarist”originalet”Slayer’s Jeff Hanneman Dead at 49””Dave Lombardo Says He Made Only $67,000 In 2011 While Touring With Slayer””Slayer: We Do Not Agree With Dave Lombardo's Substance Or Timeline Of Events””Slayer Welcomes Drummer Paul Bostaph Back To The Fold””Slayer Hope to Unveil Never-Before-Heard Jeff Hanneman Material on Next Album””Slayer Debut New Song 'Implode' During Surprise Golden Gods Appearance””Release group Repentless by Slayer””Repentless - Slayer - Credits””Slayer””Metal Storm Awards 2015””Slayer - to release comic book "Repentless #1"””Slayer To Release 'Repentless' 6.66" Vinyl Box Set””BREAKING NEWS: Slayer Announce Farewell Tour””Slayer Recruit Lamb of God, Anthrax, Behemoth + Testament for Final Tour””Slayer lägger ner efter 37 år””Slayer Announces Second North American Leg Of 'Final' Tour””Final World Tour””Slayer Announces Final European Tour With Lamb of God, Anthrax And Obituary””Slayer To Tour Europe With Lamb of God, Anthrax And Obituary””Slayer To Play 'Last French Show Ever' At Next Year's Hellfst””Slayer's Final World Tour Will Extend Into 2019””Death Angel's Rob Cavestany On Slayer's 'Farewell' Tour: 'Some Of Us Could See This Coming'””Testament Has No Plans To Retire Anytime Soon, Says Chuck Billy””Anthrax's Scott Ian On Slayer's 'Farewell' Tour Plans: 'I Was Surprised And I Wasn't Surprised'””Slayer””Slayer's Morbid Schlock””Review/Rock; For Slayer, the Mania Is the Message””Slayer - Biography””Slayer - Reign In Blood”originalet”Dave Lombardo””An exclusive oral history of Slayer”originalet”Exclusive! Interview With Slayer Guitarist Jeff Hanneman”originalet”Thinking Out Loud: Slayer's Kerry King on hair metal, Satan and being polite””Slayer Lyrics””Slayer - Biography””Most influential artists for extreme metal music””Slayer - Reign in Blood””Slayer guitarist Jeff Hanneman dies aged 49””Slatanic Slaughter: A Tribute to Slayer””Gateway to Hell: A Tribute to Slayer””Covered In Blood””Slayer: The Origins of Thrash in San Francisco, CA.””Why They Rule - #6 Slayer”originalet”Guitar World's 100 Greatest Heavy Metal Guitarists Of All Time”originalet”The fans have spoken: Slayer comes out on top in readers' polls”originalet”Tribute to Jeff Hanneman (1964-2013)””Lamb Of God Frontman: We Sound Like A Slayer Rip-Off””BEHEMOTH Frontman Pays Tribute To SLAYER's JEFF HANNEMAN””Slayer, Hatebreed Doing Double Duty On This Year's Ozzfest””System of a Down””Lacuna Coil’s Andrea Ferro Talks Influences, Skateboarding, Band Origins + More””Slayer - Reign in Blood””Into The Lungs of Hell””Slayer rules - en utställning om fans””Slayer and Their Fans Slashed Through a No-Holds-Barred Night at Gas Monkey””Home””Slayer””Gold & Platinum - The Big 4 Live from Sofia, Bulgaria””Exclusive! Interview With Slayer Guitarist Kerry King””2008-02-23: Wiltern, Los Angeles, CA, USA””Slayer's Kerry King To Perform With Megadeth Tonight! - Oct. 21, 2010”originalet”Dave Lombardo - Biography”Slayer Case DismissedArkiveradUltimate Classic Rock: Slayer guitarist Jeff Hanneman dead at 49.”Slayer: "We could never do any thing like Some Kind Of Monster..."””Cannibal Corpse'S Pat O'Brien Will Step In As Slayer'S Guest Guitarist | The Official Slayer Site”originalet”Slayer Wins 'Best Metal' Grammy Award””Slayer Guitarist Jeff Hanneman Dies””Kerrang! Awards 2006 Blog: Kerrang! Hall Of Fame””Kerrang! Awards 2013: Kerrang! Legend”originalet”Metallica, Slayer, Iron Maien Among Winners At Metal Hammer Awards””Metal Hammer Golden Gods Awards””Bullet For My Valentine Booed At Metal Hammer Golden Gods Awards””Metal Storm Awards 2006””Metal Storm Awards 2015””Slayer's Concert History””Slayer - Relationships””Slayer - Releases”Slayers officiella webbplatsSlayer på MusicBrainzOfficiell webbplatsSlayerSlayerr1373445760000 0001 1540 47353068615-5086262726cb13906545x(data)6033143kn20030215029